title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
TST: dont skip test_replace_series | diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index e3ad3f733a302..256aaef8eb5a7 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -986,10 +986,6 @@ class TestReplaceSeriesCoercion(CoercionBase):
],
)
def test_replace_series(self, how, to_key, from_key):
- if from_key == "bool" and how == "series":
- # doesn't work in PY3, though ...dict_from_bool works fine
- pytest.skip("doesn't work as in PY3")
-
index = pd.Index([3, 4], name="xxx")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
| Passes locally, we'll see about the CI | https://api.github.com/repos/pandas-dev/pandas/pulls/30443 | 2019-12-24T04:31:26Z | 2019-12-24T13:02:00Z | 2019-12-24T13:02:00Z | 2019-12-24T16:47:33Z |
CLN .format to f-strings for several files | diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 2208fbf933387..e8b6491c5026c 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -42,10 +42,8 @@
engine,
marks=pytest.mark.skipif(
engine == "numexpr" and not _USE_NUMEXPR,
- reason="numexpr enabled->{enabled}, "
- "installed->{installed}".format(
- enabled=_USE_NUMEXPR, installed=_NUMEXPR_INSTALLED
- ),
+ reason=f"numexpr enabled->{_USE_NUMEXPR}, "
+ f"installed->{_NUMEXPR_INSTALLED}",
),
)
for engine in _engines
@@ -189,9 +187,7 @@ def test_complex_cmp_ops(self, cmp1, cmp2):
rhs_new = _eval_single_bin(lhs, cmp2, rhs, self.engine)
expected = _eval_single_bin(lhs_new, binop, rhs_new, self.engine)
- ex = "(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)".format(
- cmp1=cmp1, binop=binop, cmp2=cmp2
- )
+ ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
self.check_equal(result, expected)
@@ -265,9 +261,9 @@ def check_operands(left, right, cmp_op):
rhs_new = check_operands(mid, rhs, cmp2)
if lhs_new is not None and rhs_new is not None:
- ex1 = "lhs {0} mid {1} rhs".format(cmp1, cmp2)
- ex2 = "lhs {0} mid and mid {1} rhs".format(cmp1, cmp2)
- ex3 = "(lhs {0} mid) & (mid {1} rhs)".format(cmp1, cmp2)
+ ex1 = f"lhs {cmp1} mid {cmp2} rhs"
+ ex2 = f"lhs {cmp1} mid and mid {cmp2} rhs"
+ ex3 = f"(lhs {cmp1} mid) & (mid {cmp2} rhs)"
expected = _eval_single_bin(lhs_new, "&", rhs_new, self.engine)
for ex in (ex1, ex2, ex3):
@@ -276,7 +272,7 @@ def check_operands(left, right, cmp_op):
tm.assert_almost_equal(result, expected)
def check_simple_cmp_op(self, lhs, cmp1, rhs):
- ex = "lhs {0} rhs".format(cmp1)
+ ex = f"lhs {cmp1} rhs"
msg = (
r"only list-like( or dict-like)? objects are allowed to be"
r" passed to (DataFrame\.)?isin\(\), you passed a"
@@ -297,12 +293,12 @@ def check_simple_cmp_op(self, lhs, cmp1, rhs):
self.check_equal(result, expected)
def check_binary_arith_op(self, lhs, arith1, rhs):
- ex = "lhs {0} rhs".format(arith1)
+ ex = f"lhs {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = _eval_single_bin(lhs, arith1, rhs, self.engine)
tm.assert_almost_equal(result, expected)
- ex = "lhs {0} rhs {0} rhs".format(arith1)
+ ex = f"lhs {arith1} rhs {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
nlhs = _eval_single_bin(lhs, arith1, rhs, self.engine)
self.check_alignment(result, nlhs, rhs, arith1)
@@ -317,25 +313,25 @@ def check_alignment(self, result, nlhs, ghs, op):
else:
# direct numpy comparison
- expected = self.ne.evaluate("nlhs {0} ghs".format(op))
+ expected = self.ne.evaluate(f"nlhs {op} ghs")
tm.assert_numpy_array_equal(result.values, expected)
# modulus, pow, and floor division require special casing
def check_modulus(self, lhs, arith1, rhs):
- ex = "lhs {0} rhs".format(arith1)
+ ex = f"lhs {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs % rhs
tm.assert_almost_equal(result, expected)
- expected = self.ne.evaluate("expected {0} rhs".format(arith1))
+ expected = self.ne.evaluate(f"expected {arith1} rhs")
if isinstance(result, (DataFrame, Series)):
tm.assert_almost_equal(result.values, expected)
else:
tm.assert_almost_equal(result, expected.item())
def check_floor_division(self, lhs, arith1, rhs):
- ex = "lhs {0} rhs".format(arith1)
+ ex = f"lhs {arith1} rhs"
if self.engine == "python":
res = pd.eval(ex, engine=self.engine, parser=self.parser)
@@ -370,7 +366,7 @@ def get_expected_pow_result(self, lhs, rhs):
return expected
def check_pow(self, lhs, arith1, rhs):
- ex = "lhs {0} rhs".format(arith1)
+ ex = f"lhs {arith1} rhs"
expected = self.get_expected_pow_result(lhs, rhs)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
@@ -384,7 +380,7 @@ def check_pow(self, lhs, arith1, rhs):
else:
tm.assert_almost_equal(result, expected)
- ex = "(lhs {0} rhs) {0} rhs".format(arith1)
+ ex = f"(lhs {arith1} rhs) {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = self.get_expected_pow_result(
self.get_expected_pow_result(lhs, rhs), rhs
@@ -409,7 +405,7 @@ def check_single_invert_op(self, lhs, cmp1, rhs):
def check_compound_invert_op(self, lhs, cmp1, rhs):
skip_these = ["in", "not in"]
- ex = "~(lhs {0} rhs)".format(cmp1)
+ ex = f"~(lhs {cmp1} rhs)"
msg = (
r"only list-like( or dict-like)? objects are allowed to be"
@@ -443,7 +439,7 @@ def check_compound_invert_op(self, lhs, cmp1, rhs):
tm.assert_almost_equal(ev, result)
def ex(self, op, var_name="lhs"):
- return "{0}{1}".format(op, var_name)
+ return f"{op}{var_name}"
def test_frame_invert(self):
expr = self.ex("~")
@@ -733,16 +729,16 @@ def test_float_truncation(self):
df = pd.DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
cutoff = 1000000000.0006
- result = df.query("A < {cutoff:.4f}".format(cutoff=cutoff))
+ result = df.query(f"A < {cutoff:.4f}")
assert result.empty
cutoff = 1000000000.0010
- result = df.query("A > {cutoff:.4f}".format(cutoff=cutoff))
+ result = df.query(f"A > {cutoff:.4f}")
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
- result = df.query("A == {exact:.4f}".format(exact=exact))
+ result = df.query(f"A == {exact:.4f}")
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
@@ -781,7 +777,7 @@ def setup_ops(self):
self.unary_ops = "+", "-", "~"
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
- ex1 = "lhs {0} mid {1} rhs".format(cmp1, cmp2)
+ ex1 = f"lhs {cmp1} mid {cmp2} rhs"
with pytest.raises(NotImplementedError):
pd.eval(ex1, engine=self.engine, parser=self.parser)
@@ -794,7 +790,7 @@ def setup_class(cls):
cls.parser = "python"
def check_modulus(self, lhs, arith1, rhs):
- ex = "lhs {0} rhs".format(arith1)
+ ex = f"lhs {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs % rhs
@@ -811,7 +807,7 @@ def check_alignment(self, result, nlhs, ghs, op):
# TypeError, AttributeError: series or frame with scalar align
pass
else:
- expected = eval("nlhs {0} ghs".format(op))
+ expected = eval(f"nlhs {op} ghs")
tm.assert_almost_equal(result, expected)
@@ -840,13 +836,13 @@ class TestTypeCasting:
@pytest.mark.parametrize("dt", [np.float32, np.float64])
def test_binop_typecasting(self, engine, parser, op, dt):
df = tm.makeCustomDataframe(5, 3, data_gen_f=f, dtype=dt)
- s = "df {} 3".format(op)
+ s = f"df {op} 3"
res = pd.eval(s, engine=engine, parser=parser)
assert df.values.dtype == dt
assert res.values.dtype == dt
tm.assert_frame_equal(res, eval(s))
- s = "3 {} df".format(op)
+ s = f"3 {op} df"
res = pd.eval(s, engine=engine, parser=parser)
assert df.values.dtype == dt
assert res.values.dtype == dt
@@ -1013,8 +1009,8 @@ def test_series_frame_commutativity(self, engine, parser):
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
- lhs = "s {0} df".format(op)
- rhs = "df {0} s".format(op)
+ lhs = f"s {op} df"
+ rhs = f"df {op} s"
if should_warn(df.index, s.index):
with tm.assert_produces_warning(RuntimeWarning):
a = pd.eval(lhs, engine=engine, parser=parser)
@@ -1149,9 +1145,9 @@ def test_simple_arith_ops(self):
ops = self.arith_ops
for op in filter(lambda x: x != "//", ops):
- ex = "1 {0} 1".format(op)
- ex2 = "x {0} 1".format(op)
- ex3 = "1 {0} (x + 1)".format(op)
+ ex = f"1 {op} 1"
+ ex2 = f"x {op} 1"
+ ex3 = f"1 {op} (x + 1)"
if op in ("in", "not in"):
msg = "argument of type 'int' is not iterable"
@@ -1176,7 +1172,7 @@ def test_simple_arith_ops(self):
def test_simple_bool_ops(self):
for op, lhs, rhs in product(expr._bool_ops_syms, (True, False), (True, False)):
- ex = "{0} {1} {2}".format(lhs, op, rhs)
+ ex = f"{lhs} {op} {rhs}"
res = self.eval(ex)
exp = eval(ex)
assert res == exp
@@ -1185,7 +1181,7 @@ def test_bool_ops_with_constants(self):
for op, lhs, rhs in product(
expr._bool_ops_syms, ("True", "False"), ("True", "False")
):
- ex = "{0} {1} {2}".format(lhs, op, rhs)
+ ex = f"{lhs} {op} {rhs}"
res = self.eval(ex)
exp = eval(ex)
assert res == exp
@@ -1679,7 +1675,7 @@ def test_bool_ops_with_constants(self):
for op, lhs, rhs in product(
expr._bool_ops_syms, ("True", "False"), ("True", "False")
):
- ex = "{0} {1} {2}".format(lhs, op, rhs)
+ ex = f"{lhs} {op} {rhs}"
if op in ("and", "or"):
with pytest.raises(NotImplementedError):
self.eval(ex)
@@ -1690,7 +1686,7 @@ def test_bool_ops_with_constants(self):
def test_simple_bool_ops(self):
for op, lhs, rhs in product(expr._bool_ops_syms, (True, False), (True, False)):
- ex = "lhs {0} rhs".format(op)
+ ex = f"lhs {op} rhs"
if op in ("and", "or"):
with pytest.raises(NotImplementedError):
pd.eval(ex, engine=self.engine, parser=self.parser)
@@ -1742,7 +1738,7 @@ def test_unary_functions(self, unary_fns_for_ne):
a = df.a
for fn in unary_fns_for_ne:
- expr = "{0}(a)".format(fn)
+ expr = f"{fn}(a)"
got = self.eval(expr)
with np.errstate(all="ignore"):
expect = getattr(np, fn)(a)
@@ -1750,9 +1746,9 @@ def test_unary_functions(self, unary_fns_for_ne):
def test_floor_and_ceil_functions_raise_error(self, ne_lt_2_6_9, unary_fns_for_ne):
for fn in ("floor", "ceil"):
- msg = '"{0}" is not a supported function'.format(fn)
+ msg = f'"{fn}" is not a supported function'
with pytest.raises(ValueError, match=msg):
- expr = "{0}(100)".format(fn)
+ expr = f"{fn}(100)"
self.eval(expr)
def test_binary_functions(self):
@@ -1760,7 +1756,7 @@ def test_binary_functions(self):
a = df.a
b = df.b
for fn in self.binary_fns:
- expr = "{0}(a, b)".format(fn)
+ expr = f"{fn}(a, b)"
got = self.eval(expr)
with np.errstate(all="ignore"):
expect = getattr(np, fn)(a, b)
@@ -1971,9 +1967,9 @@ def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
lhs = gen[lhs]() # noqa
rhs = gen[rhs]() # noqa
- ex1 = "lhs {0} mid {1} rhs".format(cmp, cmp)
- ex2 = "lhs {0} mid and mid {1} rhs".format(cmp, cmp)
- ex3 = "(lhs {0} mid) & (mid {1} rhs)".format(cmp, cmp)
+ ex1 = f"lhs {cmp} mid {cmp} rhs"
+ ex2 = f"lhs {cmp} mid and mid {cmp} rhs"
+ ex3 = f"(lhs {cmp} mid) & (mid {cmp} rhs)"
for ex in (ex1, ex2, ex3):
with pytest.raises(NotImplementedError):
pd.eval(ex, engine=engine, parser=parser)
@@ -1990,7 +1986,7 @@ def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
)
def test_equals_various(other):
df = DataFrame({"A": ["a", "b", "c"]})
- result = df.eval("A == {}".format(other))
+ result = df.eval(f"A == {other}")
expected = Series([False, False, False], name="A")
if _USE_NUMEXPR:
# https://github.com/pandas-dev/pandas/issues/10239
diff --git a/pandas/tests/dtypes/cast/test_infer_dtype.py b/pandas/tests/dtypes/cast/test_infer_dtype.py
index da2ef5260d070..37fa003668435 100644
--- a/pandas/tests/dtypes/cast/test_infer_dtype.py
+++ b/pandas/tests/dtypes/cast/test_infer_dtype.py
@@ -81,7 +81,7 @@ def test_infer_dtype_from_period(freq, pandas_dtype):
dtype, val = infer_dtype_from_scalar(p, pandas_dtype=pandas_dtype)
if pandas_dtype:
- exp_dtype = "period[{0}]".format(freq)
+ exp_dtype = f"period[{freq}]"
exp_val = p.ordinal
else:
exp_dtype = np.object_
@@ -105,7 +105,7 @@ def test_infer_from_scalar_tz(tz, pandas_dtype):
dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=pandas_dtype)
if pandas_dtype:
- exp_dtype = "datetime64[ns, {0}]".format(tz)
+ exp_dtype = f"datetime64[ns, {tz}]"
exp_val = dt.value
else:
exp_dtype = np.object_
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index bca78a1008d87..4dee6e3e92a7f 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -305,7 +305,7 @@ def test_dst(self):
@pytest.mark.parametrize("constructor", ["M8", "datetime64"])
def test_parser(self, tz, constructor):
# pr #11245
- dtz_str = "{con}[ns, {tz}]".format(con=constructor, tz=tz)
+ dtz_str = f"{constructor}[ns, {tz}]"
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype("ns", tz)
assert result == expected
@@ -635,7 +635,7 @@ def test_equality_generic(self, subtype):
def test_name_repr(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
- expected = "interval[{subtype}]".format(subtype=subtype)
+ expected = f"interval[{subtype}]"
assert str(dtype) == expected
assert dtype.name == "interval"
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 47ac4113d90ce..f34a6effcc4f5 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -1320,7 +1320,7 @@ def test_is_datetime_dtypes(self):
assert is_datetime64tz_dtype(tsa)
for tz in ["US/Eastern", "UTC"]:
- dtype = "datetime64[ns, {}]".format(tz)
+ dtype = f"datetime64[ns, {tz}]"
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
@@ -1414,7 +1414,7 @@ def test_is_scalar_pandas_containers(self):
def test_datetimeindex_from_empty_datetime64_array():
for unit in ["ms", "us", "ns"]:
- idx = DatetimeIndex(np.array([], dtype="datetime64[{unit}]".format(unit=unit)))
+ idx = DatetimeIndex(np.array([], dtype=f"datetime64[{unit}]"))
assert len(idx) == 0
diff --git a/pandas/tests/scalar/interval/test_ops.py b/pandas/tests/scalar/interval/test_ops.py
index f560c42617260..2d9f0954af5a8 100644
--- a/pandas/tests/scalar/interval/test_ops.py
+++ b/pandas/tests/scalar/interval/test_ops.py
@@ -59,8 +59,6 @@ def test_overlaps_endpoint(self, start_shift, closed, other_closed):
)
def test_overlaps_invalid_type(self, other):
interval = Interval(0, 1)
- msg = "`other` must be an Interval, got {other}".format(
- other=type(other).__name__
- )
+ msg = f"`other` must be an Interval, got {type(other).__name__}"
with pytest.raises(TypeError, match=msg):
interval.overlaps(other)
| updated to f-strings for:
pandas/tests/scalar/interval/test_ops.py
pandas/tests/computation/test_eval.py
pandas/tests/dtypes/*
pandas/tests/dtypes/cast/test_infer_dtype.py
pandas/tests/dtypes/test_dtypes.py
pandas/tests/dtypes/test_inference.py
held off on implementing for regex match strings pending PEP 536
- [x] contributes to fstring corrections in #29547
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/30442 | 2019-12-24T04:14:13Z | 2019-12-24T13:01:26Z | 2019-12-24T13:01:26Z | 2019-12-24T14:14:42Z |
TST: clean up skips and xfails | diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py
index be1a423c22aea..f01a030ad0e22 100644
--- a/pandas/tests/frame/methods/test_rank.py
+++ b/pandas/tests/frame/methods/test_rank.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas import DataFrame, Series
import pandas.util.testing as tm
@@ -26,8 +28,10 @@ def method(self, request):
"""
return request.param
+ @td.skip_if_no_scipy
def test_rank(self, float_frame):
- rankdata = pytest.importorskip("scipy.stats.rankdata")
+ import scipy.stats # noqa:F401
+ from scipy.stats import rankdata
float_frame["A"][::2] = np.nan
float_frame["B"][::3] = np.nan
@@ -117,8 +121,10 @@ def test_rank_mixed_frame(self, float_string_frame):
expected = float_string_frame.rank(1, numeric_only=True)
tm.assert_frame_equal(result, expected)
+ @td.skip_if_no_scipy
def test_rank_na_option(self, float_frame):
- rankdata = pytest.importorskip("scipy.stats.rankdata")
+ import scipy.stats # noqa:F401
+ from scipy.stats import rankdata
float_frame["A"][::2] = np.nan
float_frame["B"][::3] = np.nan
@@ -199,9 +205,10 @@ def test_rank_axis(self):
tm.assert_frame_equal(df.rank(axis=0), df.rank(axis="index"))
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis="columns"))
+ @td.skip_if_no_scipy
def test_rank_methods_frame(self):
- pytest.importorskip("scipy.stats.special")
- rankdata = pytest.importorskip("scipy.stats.rankdata")
+ import scipy.stats # noqa:F401
+ from scipy.stats import rankdata
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 2f7ed3238b767..58ab44fba08cf 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -725,15 +725,10 @@ def test_constructor_with_int_tz(self, klass, box, tz, dtype):
expected = klass([ts])
assert result == expected
- # This is the desired future behavior
- # Note: this xfail is not strict because the test passes with
- # None or any of the UTC variants for tz_naive_fixture
- @pytest.mark.xfail(reason="Future behavior", strict=False)
- @pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
def test_construction_int_rountrip(self, tz_naive_fixture):
- # GH 12619
- # TODO(GH-24559): Remove xfail
+ # GH 12619, GH#24559
tz = tz_naive_fixture
+
result = 1293858000000000000
expected = DatetimeIndex([result], tz=tz).asi8[0]
assert result == expected
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 6e919571d1423..f1c23d7b245c6 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -720,13 +720,11 @@ def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype):
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
+ @td.skip_if_no("psycopg2")
def test_to_datetime_tz_psycopg2(self, cache):
# xref 8260
- try:
- import psycopg2
- except ImportError:
- pytest.skip("no psycopg2 installed")
+ import psycopg2
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py
index 31de40512c474..f053f690e1018 100644
--- a/pandas/tests/indexes/multi/test_missing.py
+++ b/pandas/tests/indexes/multi/test_missing.py
@@ -101,7 +101,7 @@ def test_nulls(idx):
idx.isna()
-@pytest.mark.xfail
+@pytest.mark.xfail(reason="isna is not defined for MultiIndex")
def test_hasnans_isnans(idx):
# GH 11343, added tests for hasnans / isnans
index = idx.copy()
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index ea128c8c3a422..12d834131f71b 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -55,6 +55,10 @@ def test_oo_optimizable():
@tm.network
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
+@pytest.mark.filterwarnings(
+ # patsy needs to update their imports
+ "ignore:Using or importing the ABCs from 'collections:DeprecationWarning"
+)
def test_statsmodels():
statsmodels = import_module("statsmodels") # noqa
diff --git a/pandas/tests/window/test_moments.py b/pandas/tests/window/test_moments.py
index 2c65c9e2ac82c..7a6c64d9f9036 100644
--- a/pandas/tests/window/test_moments.py
+++ b/pandas/tests/window/test_moments.py
@@ -2150,6 +2150,7 @@ def test_rolling_corr_diff_length(self):
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
],
)
+ @td.skip_if_no_scipy
def test_rolling_functions_window_non_shrinkage(self, f):
# GH 7764
s = Series(range(4))
@@ -2157,16 +2158,11 @@ def test_rolling_functions_window_non_shrinkage(self, f):
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
- try:
- s_result = f(s)
- tm.assert_series_equal(s_result, s_expected)
-
- df_result = f(df)
- tm.assert_frame_equal(df_result, df_expected)
- except (ImportError):
+ s_result = f(s)
+ tm.assert_series_equal(s_result, s_expected)
- # scipy needed for rolling_window
- pytest.skip("scipy not available")
+ df_result = f(df)
+ tm.assert_frame_equal(df_result, df_expected)
def test_rolling_functions_window_non_shrinkage_binary(self):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30441 | 2019-12-24T02:51:27Z | 2019-12-24T20:41:18Z | 2019-12-24T20:41:18Z | 2019-12-24T20:41:51Z |
Replaced .format{}/% with f-strings in core/tools/datetime.py | diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 2d43623cec92c..f193865d90b71 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -475,8 +475,7 @@ def _adjust_to_origin(arg, origin, unit):
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslibs.OutOfBoundsDatetime(
- "{original} is Out of Bounds for "
- "origin='julian'".format(original=original)
+ f"{original} is Out of Bounds for origin='julian'"
)
else:
# arg must be numeric
@@ -485,27 +484,20 @@ def _adjust_to_origin(arg, origin, unit):
or is_numeric_dtype(np.asarray(arg))
):
raise ValueError(
- "'{arg}' is not compatible with origin='{origin}'; "
- "it must be numeric with a unit specified ".format(
- arg=arg, origin=origin
- )
+ f"'{arg}' is not compatible with origin='{origin}'; "
+ "it must be numeric with a unit specified"
)
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except tslibs.OutOfBoundsDatetime:
- raise tslibs.OutOfBoundsDatetime(
- "origin {origin} is Out of Bounds".format(origin=origin)
- )
+ raise tslibs.OutOfBoundsDatetime(f"origin {origin} is Out of Bounds")
except ValueError:
- raise ValueError(
- "origin {origin} cannot be converted "
- "to a Timestamp".format(origin=origin)
- )
+ raise ValueError(f"origin {origin} cannot be converted to a Timestamp")
if offset.tz is not None:
- raise ValueError("origin offset {} must be tz-naive".format(offset))
+ raise ValueError(f"origin offset {offset} must be tz-naive")
offset -= Timestamp(0)
# convert the offset to the unit of the arg
@@ -808,19 +800,19 @@ def f(value):
required = ["year", "month", "day"]
req = sorted(set(required) - set(unit_rev.keys()))
if len(req):
+ required = ",".join(req)
raise ValueError(
"to assemble mappings requires at least that "
- "[year, month, day] be specified: [{required}] "
- "is missing".format(required=",".join(req))
+ f"[year, month, day] be specified: [{required}] "
+ "is missing"
)
# keys we don't recognize
excess = sorted(set(unit_rev.keys()) - set(_unit_map.values()))
if len(excess):
+ excess = ",".join(excess)
raise ValueError(
- "extra keys have been passed "
- "to the datetime assemblage: "
- "[{excess}]".format(excess=",".join(excess))
+ f"extra keys have been passed to the datetime assemblage: [{excess}]"
)
def coerce(values):
@@ -983,9 +975,9 @@ def _convert_listlike(arg, format):
except (ValueError, TypeError):
if errors == "raise":
msg = (
- "Cannot convert {element} to a time with given "
- "format {format}"
- ).format(element=element, format=format)
+ f"Cannot convert {element} to a time with given "
+ f"format {format}"
+ )
raise ValueError(msg)
elif errors == "ignore":
return arg
@@ -1011,9 +1003,7 @@ def _convert_listlike(arg, format):
if time_object is not None:
times.append(time_object)
elif errors == "raise":
- raise ValueError(
- "Cannot convert arg {arg} to a time".format(arg=arg)
- )
+ raise ValueError(f"Cannot convert arg {arg} to a time")
elif errors == "ignore":
return arg
else:
| Modified to python3 format strings
ref: https://github.com/pandas-dev/pandas/issues/29547
- [x] contributes to fstring corrections in #29547
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
~~whatsnew entry~~ (will not add now. I think the full set of changes in #29547 should get a single update entry rather than a piecemeal one)
| https://api.github.com/repos/pandas-dev/pandas/pulls/30440 | 2019-12-24T02:25:18Z | 2019-12-24T13:39:42Z | 2019-12-24T13:39:42Z | 2019-12-24T13:39:46Z |
REF: de-duplicate/parametrize arithmetic tests | diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index c055b3e62a368..afce374aebe05 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -1890,13 +1890,10 @@ def test_dti_addsub_int(self, tz_naive_fixture, one):
with pytest.raises(TypeError, match=msg):
rng + one
-
with pytest.raises(TypeError, match=msg):
rng += one
-
with pytest.raises(TypeError, match=msg):
rng - one
-
with pytest.raises(TypeError, match=msg):
rng -= one
@@ -1910,13 +1907,8 @@ def test_dti_add_intarray_tick(self, int_holder, freq):
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
- msg = "Addition/subtraction of integers"
-
- with pytest.raises(TypeError, match=msg):
- dti + other
-
- with pytest.raises(TypeError, match=msg):
- other + dti
+ msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
+ assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@@ -1925,29 +1917,18 @@ def test_dti_add_intarray_non_tick(self, int_holder, freq):
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
- msg = "Addition/subtraction of integers"
-
- with pytest.raises(TypeError, match=msg):
- dti + other
-
- with pytest.raises(TypeError, match=msg):
- other + dti
+ msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
+ assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = pd.DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
- tmsg = "cannot subtract DatetimeArray from"
- msg = "Addition/subtraction of integers"
- with pytest.raises(TypeError, match=msg):
- dti + other
- with pytest.raises(TypeError, match=msg):
- other + dti
- with pytest.raises(TypeError, match=msg):
- dti - other
- with pytest.raises(TypeError, match=tmsg):
- other - dti
+ msg = "|".join(
+ ["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
+ )
+ assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index cafbbc9aef6f7..cc337f8fdd7ce 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -18,7 +18,11 @@
Timestamp,
timedelta_range,
)
-from pandas.tests.arithmetic.common import assert_invalid_comparison, get_upcast_box
+from pandas.tests.arithmetic.common import (
+ assert_invalid_addsub_type,
+ assert_invalid_comparison,
+ get_upcast_box,
+)
import pandas.util.testing as tm
# ------------------------------------------------------------------
@@ -470,6 +474,62 @@ def test_tda_add_sub_index(self):
expected = tdi - tdi
tm.assert_index_equal(result, expected)
+ # -------------------------------------------------------------
+ # Binary operations TimedeltaIndex and timedelta-like
+
+ def test_tdi_iadd_timedeltalike(self, two_hours):
+ # only test adding/sub offsets as + is now numeric
+ rng = timedelta_range("1 days", "10 days")
+ expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
+ rng += two_hours
+ tm.assert_index_equal(rng, expected)
+
+ def test_tdi_isub_timedeltalike(self, two_hours):
+ # only test adding/sub offsets as - is now numeric
+ rng = timedelta_range("1 days", "10 days")
+ expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
+ rng -= two_hours
+ tm.assert_index_equal(rng, expected)
+
+ # -------------------------------------------------------------
+
+ def test_tdi_ops_attributes(self):
+ rng = timedelta_range("2 days", periods=5, freq="2D", name="x")
+
+ result = rng + 1 * rng.freq
+ exp = timedelta_range("4 days", periods=5, freq="2D", name="x")
+ tm.assert_index_equal(result, exp)
+ assert result.freq == "2D"
+
+ result = rng - 2 * rng.freq
+ exp = timedelta_range("-2 days", periods=5, freq="2D", name="x")
+ tm.assert_index_equal(result, exp)
+ assert result.freq == "2D"
+
+ result = rng * 2
+ exp = timedelta_range("4 days", periods=5, freq="4D", name="x")
+ tm.assert_index_equal(result, exp)
+ assert result.freq == "4D"
+
+ result = rng / 2
+ exp = timedelta_range("1 days", periods=5, freq="D", name="x")
+ tm.assert_index_equal(result, exp)
+ assert result.freq == "D"
+
+ result = -rng
+ exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x")
+ tm.assert_index_equal(result, exp)
+ assert result.freq == "-2D"
+
+ rng = pd.timedelta_range("-2 days", periods=5, freq="D", name="x")
+
+ result = abs(rng)
+ exp = TimedeltaIndex(
+ ["2 days", "1 days", "0 days", "1 days", "2 days"], name="x"
+ )
+ tm.assert_index_equal(result, exp)
+ assert result.freq is None
+
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
@@ -541,22 +601,6 @@ def test_tdi_add_overflow(self):
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
- # TODO: moved from frame tests; needs parametrization/de-duplication
- def test_td64_df_add_int_frame(self):
- # GH#22696 Check that we don't dispatch to numpy implementation,
- # which treats int64 as m8[ns]
- tdi = pd.timedelta_range("1", periods=3)
- df = tdi.to_frame()
- other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
- with pytest.raises(TypeError):
- df + other
- with pytest.raises(TypeError):
- other + df
- with pytest.raises(TypeError):
- df - other
- with pytest.raises(TypeError):
- other - df
-
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
@@ -811,51 +855,6 @@ def test_timedelta64_ops_nat(self):
tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)
- # -------------------------------------------------------------
- # Invalid Operations
-
- @pytest.mark.parametrize("other", ["a", 3.14, np.array([2.0, 3.0])])
- def test_td64arr_add_sub_invalid(self, box_with_array, other):
- # GH#13624 for str
- tdi = TimedeltaIndex(["1 day", "2 days"])
- tdarr = tm.box_expected(tdi, box_with_array)
-
- with pytest.raises(TypeError):
- tdarr + other
- with pytest.raises(TypeError):
- other + tdarr
- with pytest.raises(TypeError):
- tdarr - other
- with pytest.raises(TypeError):
- other - tdarr
-
- @pytest.mark.parametrize("freq", [None, "H"])
- def test_td64arr_sub_period(self, box_with_array, freq):
- # GH#13078
- # not supported, check TypeError
- p = pd.Period("2011-01-01", freq="D")
- idx = TimedeltaIndex(["1 hours", "2 hours"], freq=freq)
- idx = tm.box_expected(idx, box_with_array)
-
- with pytest.raises(TypeError):
- idx - p
-
- with pytest.raises(TypeError):
- p - idx
-
- @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
- @pytest.mark.parametrize("tdi_freq", [None, "H"])
- def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
- # GH#20049 subtracting PeriodIndex should raise TypeError
- tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
- dti = Timestamp("2018-03-07 17:16:40") + tdi
- pi = dti.to_period(pi_freq)
-
- # TODO: parametrize over box for pi?
- tdi = tm.box_expected(tdi, box_with_array)
- with pytest.raises(TypeError):
- tdi - pi
-
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@@ -962,127 +961,109 @@ def test_td64arr_add_datetime64_nat(self, box_with_array):
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
- # Operations with int-like others
+ # Invalid __add__/__sub__ operations
+
+ # TODO: moved from frame tests; needs parametrization/de-duplication
+ def test_td64_df_add_int_frame(self):
+ # GH#22696 Check that we don't dispatch to numpy implementation,
+ # which treats int64 as m8[ns]
+ tdi = pd.timedelta_range("1", periods=3)
+ df = tdi.to_frame()
+ other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
+ assert_invalid_addsub_type(df, other)
+
+ @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
+ @pytest.mark.parametrize("tdi_freq", [None, "H"])
+ def test_td64arr_sub_periodlike(self, box_with_array, tdi_freq, pi_freq):
+ # GH#20049 subtracting PeriodIndex should raise TypeError
+ tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
+ dti = Timestamp("2018-03-07 17:16:40") + tdi
+ pi = dti.to_period(pi_freq)
+
+ # TODO: parametrize over box for pi?
+ tdi = tm.box_expected(tdi, box_with_array)
+ with pytest.raises(TypeError):
+ tdi - pi
+
+ # FIXME: don't leave commented-out
+ # FIXME: this raises with period scalar but not with PeriodIndex?
+ # with pytest.raises(TypeError):
+ # pi - tdi
+
+ # GH#13078 subtraction of Period scalar not supported
+ with pytest.raises(TypeError):
+ tdi - pi[0]
+ with pytest.raises(TypeError):
+ pi[0] - tdi
@pytest.mark.parametrize(
"other",
[
+ # GH#12624 for str case
+ "a",
# GH#19123
1,
- Series([20, 30, 40], dtype="uint8"),
- np.array([20, 30, 40], dtype="uint8"),
- pd.UInt64Index([20, 30, 40]),
- pd.Int64Index([20, 30, 40]),
- Series([2, 3, 4]),
1.5,
np.array(2),
],
)
- def test_td64arr_addsub_numeric_invalid(self, box_with_array, other):
- box = box_with_array
+ def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other):
+ # vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
- tdser = tm.box_expected(tdser, box)
+ tdarr = tm.box_expected(tdser, box_with_array)
- with pytest.raises(TypeError):
- tdser + other
- with pytest.raises(TypeError):
- other + tdser
- with pytest.raises(TypeError):
- tdser - other
- with pytest.raises(TypeError):
- other - tdser
+ assert_invalid_addsub_type(tdarr, other)
- @pytest.mark.parametrize(
- "dtype",
- [
- "int64",
- "int32",
- "int16",
- "uint64",
- "uint32",
- "uint16",
- "uint8",
- "float64",
- "float32",
- "float16",
- ],
- )
@pytest.mark.parametrize(
"vec",
[
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
- Series([1, 2, 3])
- # TODO: Add DataFrame in here?
+ Series([1, 2, 3]),
+ DataFrame([[1, 2, 3]]),
],
ids=lambda x: type(x).__name__,
)
- def test_td64arr_add_sub_numeric_arr_invalid(self, box_with_array, vec, dtype):
- box = box_with_array
+ def test_td64arr_addsub_numeric_arr_invalid(
+ self, box_with_array, vec, any_real_dtype
+ ):
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
- tdser = tm.box_expected(tdser, box)
+ tdarr = tm.box_expected(tdser, box_with_array)
- vector = vec.astype(dtype)
- with pytest.raises(TypeError):
- tdser + vector
- with pytest.raises(TypeError):
- vector + tdser
- with pytest.raises(TypeError):
- tdser - vector
- with pytest.raises(TypeError):
- vector - tdser
+ vector = vec.astype(any_real_dtype)
+ assert_invalid_addsub_type(tdarr, vector)
- # TODO: parameterize over box and de-duplicate
- def test_tdi_add_sub_int(self, one):
+ def test_td64arr_add_sub_int(self, box_with_array, one):
# Variants of `one` for #19012, deprecated GH#22535
rng = timedelta_range("1 days 09:00:00", freq="H", periods=10)
+ tdarr = tm.box_expected(rng, box_with_array)
+
msg = "Addition/subtraction of integers"
+ assert_invalid_addsub_type(tdarr, one, msg)
+ # TOOD: get inplace ops into assert_invalid_addsub_type
with pytest.raises(TypeError, match=msg):
- rng + one
+ tdarr += one
with pytest.raises(TypeError, match=msg):
- rng += one
- with pytest.raises(TypeError, match=msg):
- rng - one
- with pytest.raises(TypeError, match=msg):
- rng -= one
+ tdarr -= one
- # TODO: parameterize over box and de-duplicate
- @pytest.mark.parametrize("box", [np.array, pd.Index])
- def test_tdi_add_sub_integer_array(self, box):
+ def test_td64arr_add_sub_integer_array(self, box_with_array):
# GH#19959, deprecated GH#22535
rng = timedelta_range("1 days 09:00:00", freq="H", periods=3)
- other = box([4, 3, 2])
- msg = "Addition/subtraction of integers and integer-arrays"
+ tdarr = tm.box_expected(rng, box_with_array)
+ other = tm.box_expected([4, 3, 2], box_with_array)
- with pytest.raises(TypeError, match=msg):
- rng + other
-
- with pytest.raises(TypeError, match=msg):
- other + rng
-
- with pytest.raises(TypeError, match=msg):
- rng - other
-
- with pytest.raises(TypeError, match=msg):
- other - rng
+ msg = "Addition/subtraction of integers and integer-arrays"
+ assert_invalid_addsub_type(tdarr, other, msg)
- # TODO: parameterize over box and de-duplicate
- @pytest.mark.parametrize("box", [np.array, pd.Index])
- def test_tdi_addsub_integer_array_no_freq(self, box):
+ def test_td64arr_addsub_integer_array_no_freq(self, box_with_array):
# GH#19959
tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"])
- other = box([14, -1, 16])
- msg = "Addition/subtraction of integers"
+ tdarr = tm.box_expected(tdi, box_with_array)
+ other = tm.box_expected([14, -1, 16], box_with_array)
- with pytest.raises(TypeError, match=msg):
- tdi + other
- with pytest.raises(TypeError, match=msg):
- other + tdi
- with pytest.raises(TypeError, match=msg):
- tdi - other
- with pytest.raises(TypeError, match=msg):
- other - tdi
+ msg = "Addition/subtraction of integers"
+ assert_invalid_addsub_type(tdarr, other, msg)
# ------------------------------------------------------------------
# Operations with timedelta-like others
@@ -1913,7 +1894,6 @@ def test_td64arr_mul_td64arr_raises(self, box_with_array):
# ------------------------------------------------------------------
# Operations with numeric others
- @pytest.mark.parametrize("one", [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
@@ -1952,33 +1932,18 @@ def test_td64arr_div_numeric_scalar(self, box_with_array, two):
with pytest.raises(TypeError, match="Cannot divide"):
two / tdser
- @pytest.mark.parametrize(
- "dtype",
- [
- "int64",
- "int32",
- "int16",
- "uint64",
- "uint32",
- "uint16",
- "uint8",
- "float64",
- "float32",
- "float16",
- ],
- )
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
- def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
+ def test_td64arr_rmul_numeric_array(self, box_with_array, vector, any_real_dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
- vector = vector.astype(dtype)
+ vector = vector.astype(any_real_dtype)
expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]")
@@ -1991,32 +1956,19 @@ def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
result = vector * tdser
tm.assert_equal(result, expected)
- @pytest.mark.parametrize(
- "dtype",
- [
- "int64",
- "int32",
- "int16",
- "uint64",
- "uint32",
- "uint16",
- "uint8",
- "float64",
- "float32",
- "float16",
- ],
- )
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
- def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):
+ def test_td64arr_div_numeric_array(self, box_with_array, vector, any_real_dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
+
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
- vector = vector.astype(dtype)
+ vector = vector.astype(any_real_dtype)
+
expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
deleted file mode 100644
index 4f9f8341cb1e5..0000000000000
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ /dev/null
@@ -1,156 +0,0 @@
-from datetime import timedelta
-
-import numpy as np
-import pytest
-
-from pandas.errors import NullFrequencyError
-
-import pandas as pd
-from pandas import Timedelta, TimedeltaIndex, timedelta_range
-import pandas.util.testing as tm
-
-
-@pytest.fixture(
- params=[
- pd.offsets.Hour(2),
- timedelta(hours=2),
- np.timedelta64(2, "h"),
- Timedelta(hours=2),
- ],
- ids=str,
-)
-def delta(request):
- # Several ways of representing two hours
- return request.param
-
-
-@pytest.fixture(params=["B", "D"])
-def freq(request):
- return request.param
-
-
-class TestTimedeltaIndexArithmetic:
- # Addition and Subtraction Operations
-
- # -------------------------------------------------------------
- # TimedeltaIndex.shift is used by __add__/__sub__
-
- def test_tdi_shift_empty(self):
- # GH#9903
- idx = pd.TimedeltaIndex([], name="xxx")
- tm.assert_index_equal(idx.shift(0, freq="H"), idx)
- tm.assert_index_equal(idx.shift(3, freq="H"), idx)
-
- def test_tdi_shift_hours(self):
- # GH#9903
- idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
- tm.assert_index_equal(idx.shift(0, freq="H"), idx)
- exp = pd.TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx")
- tm.assert_index_equal(idx.shift(3, freq="H"), exp)
- exp = pd.TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx")
- tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
-
- def test_tdi_shift_minutes(self):
- # GH#9903
- idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
- tm.assert_index_equal(idx.shift(0, freq="T"), idx)
- exp = pd.TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx")
- tm.assert_index_equal(idx.shift(3, freq="T"), exp)
- exp = pd.TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx")
- tm.assert_index_equal(idx.shift(-3, freq="T"), exp)
-
- def test_tdi_shift_int(self):
- # GH#8083
- trange = pd.to_timedelta(range(5), unit="d") + pd.offsets.Hour(1)
- result = trange.shift(1)
- expected = TimedeltaIndex(
- [
- "1 days 01:00:00",
- "2 days 01:00:00",
- "3 days 01:00:00",
- "4 days 01:00:00",
- "5 days 01:00:00",
- ],
- freq="D",
- )
- tm.assert_index_equal(result, expected)
-
- def test_tdi_shift_nonstandard_freq(self):
- # GH#8083
- trange = pd.to_timedelta(range(5), unit="d") + pd.offsets.Hour(1)
- result = trange.shift(3, freq="2D 1s")
- expected = TimedeltaIndex(
- [
- "6 days 01:00:03",
- "7 days 01:00:03",
- "8 days 01:00:03",
- "9 days 01:00:03",
- "10 days 01:00:03",
- ],
- freq="D",
- )
- tm.assert_index_equal(result, expected)
-
- def test_shift_no_freq(self):
- # GH#19147
- tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None)
- with pytest.raises(NullFrequencyError):
- tdi.shift(2)
-
- # -------------------------------------------------------------
- # Binary operations TimedeltaIndex and timedelta-like
- # Note: add and sub are tested in tests.test_arithmetic, in-place
- # tests are kept here because their behavior is Index-specific
-
- def test_tdi_iadd_timedeltalike(self, delta):
- # only test adding/sub offsets as + is now numeric
- rng = timedelta_range("1 days", "10 days")
- expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
- rng += delta
- tm.assert_index_equal(rng, expected)
-
- def test_tdi_isub_timedeltalike(self, delta):
- # only test adding/sub offsets as - is now numeric
- rng = timedelta_range("1 days", "10 days")
- expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
- rng -= delta
- tm.assert_index_equal(rng, expected)
-
- # -------------------------------------------------------------
-
- def test_tdi_ops_attributes(self):
- rng = timedelta_range("2 days", periods=5, freq="2D", name="x")
-
- result = rng + 1 * rng.freq
- exp = timedelta_range("4 days", periods=5, freq="2D", name="x")
- tm.assert_index_equal(result, exp)
- assert result.freq == "2D"
-
- result = rng - 2 * rng.freq
- exp = timedelta_range("-2 days", periods=5, freq="2D", name="x")
- tm.assert_index_equal(result, exp)
- assert result.freq == "2D"
-
- result = rng * 2
- exp = timedelta_range("4 days", periods=5, freq="4D", name="x")
- tm.assert_index_equal(result, exp)
- assert result.freq == "4D"
-
- result = rng / 2
- exp = timedelta_range("1 days", periods=5, freq="D", name="x")
- tm.assert_index_equal(result, exp)
- assert result.freq == "D"
-
- result = -rng
- exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x")
- tm.assert_index_equal(result, exp)
- assert result.freq == "-2D"
-
- rng = pd.timedelta_range("-2 days", periods=5, freq="D", name="x")
-
- result = abs(rng)
- exp = TimedeltaIndex(
- ["2 days", "1 days", "0 days", "1 days", "2 days"], name="x"
- )
- tm.assert_index_equal(result, exp)
- assert result.freq is None
diff --git a/pandas/tests/indexes/timedeltas/test_shift.py b/pandas/tests/indexes/timedeltas/test_shift.py
new file mode 100644
index 0000000000000..048b29c0da501
--- /dev/null
+++ b/pandas/tests/indexes/timedeltas/test_shift.py
@@ -0,0 +1,75 @@
+import pytest
+
+from pandas.errors import NullFrequencyError
+
+import pandas as pd
+from pandas import TimedeltaIndex
+import pandas.util.testing as tm
+
+
+class TestTimedeltaIndexShift:
+
+ # -------------------------------------------------------------
+ # TimedeltaIndex.shift is used by __add__/__sub__
+
+ def test_tdi_shift_empty(self):
+ # GH#9903
+ idx = pd.TimedeltaIndex([], name="xxx")
+ tm.assert_index_equal(idx.shift(0, freq="H"), idx)
+ tm.assert_index_equal(idx.shift(3, freq="H"), idx)
+
+ def test_tdi_shift_hours(self):
+ # GH#9903
+ idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
+ tm.assert_index_equal(idx.shift(0, freq="H"), idx)
+ exp = pd.TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx")
+ tm.assert_index_equal(idx.shift(3, freq="H"), exp)
+ exp = pd.TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx")
+ tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
+
+ def test_tdi_shift_minutes(self):
+ # GH#9903
+ idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
+ tm.assert_index_equal(idx.shift(0, freq="T"), idx)
+ exp = pd.TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx")
+ tm.assert_index_equal(idx.shift(3, freq="T"), exp)
+ exp = pd.TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx")
+ tm.assert_index_equal(idx.shift(-3, freq="T"), exp)
+
+ def test_tdi_shift_int(self):
+ # GH#8083
+ trange = pd.to_timedelta(range(5), unit="d") + pd.offsets.Hour(1)
+ result = trange.shift(1)
+ expected = TimedeltaIndex(
+ [
+ "1 days 01:00:00",
+ "2 days 01:00:00",
+ "3 days 01:00:00",
+ "4 days 01:00:00",
+ "5 days 01:00:00",
+ ],
+ freq="D",
+ )
+ tm.assert_index_equal(result, expected)
+
+ def test_tdi_shift_nonstandard_freq(self):
+ # GH#8083
+ trange = pd.to_timedelta(range(5), unit="d") + pd.offsets.Hour(1)
+ result = trange.shift(3, freq="2D 1s")
+ expected = TimedeltaIndex(
+ [
+ "6 days 01:00:03",
+ "7 days 01:00:03",
+ "8 days 01:00:03",
+ "9 days 01:00:03",
+ "10 days 01:00:03",
+ ],
+ freq="D",
+ )
+ tm.assert_index_equal(result, expected)
+
+ def test_shift_no_freq(self):
+ # GH#19147
+ tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None)
+ with pytest.raises(NullFrequencyError):
+ tdi.shift(2)
| Use helper methods implemented in #30354.
Some of this is things made possible since we enforced the integer add/sub deprecation. | https://api.github.com/repos/pandas-dev/pandas/pulls/30438 | 2019-12-23T21:31:14Z | 2019-12-24T13:04:36Z | 2019-12-24T13:04:36Z | 2019-12-24T16:52:01Z |
REF: method-specific tests for cov, corr, corrwith, count, round | diff --git a/pandas/tests/frame/methods/test_count.py b/pandas/tests/frame/methods/test_count.py
new file mode 100644
index 0000000000000..b5d3d60579f54
--- /dev/null
+++ b/pandas/tests/frame/methods/test_count.py
@@ -0,0 +1,36 @@
+from pandas import DataFrame, Series
+import pandas.util.testing as tm
+
+
+class TestDataFrameCount:
+ def test_count(self):
+ # corner case
+ frame = DataFrame()
+ ct1 = frame.count(1)
+ assert isinstance(ct1, Series)
+
+ ct2 = frame.count(0)
+ assert isinstance(ct2, Series)
+
+ # GH#423
+ df = DataFrame(index=range(10))
+ result = df.count(1)
+ expected = Series(0, index=df.index)
+ tm.assert_series_equal(result, expected)
+
+ df = DataFrame(columns=range(10))
+ result = df.count(0)
+ expected = Series(0, index=df.columns)
+ tm.assert_series_equal(result, expected)
+
+ df = DataFrame()
+ result = df.count()
+ expected = Series(0, index=[])
+ tm.assert_series_equal(result, expected)
+
+ def test_count_objects(self, float_string_frame):
+ dm = DataFrame(float_string_frame._series)
+ df = DataFrame(float_string_frame._series)
+
+ tm.assert_series_equal(dm.count(), df.count())
+ tm.assert_series_equal(dm.count(1), df.count(1))
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
new file mode 100644
index 0000000000000..04bc87a243a9b
--- /dev/null
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -0,0 +1,289 @@
+import warnings
+
+import numpy as np
+import pytest
+
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import DataFrame, Series, isna
+import pandas.util.testing as tm
+
+
+class TestDataFrameCov:
+ def test_cov(self, float_frame, float_string_frame):
+ # min_periods no NAs (corner case)
+ expected = float_frame.cov()
+ result = float_frame.cov(min_periods=len(float_frame))
+
+ tm.assert_frame_equal(expected, result)
+
+ result = float_frame.cov(min_periods=len(float_frame) + 1)
+ assert isna(result.values).all()
+
+ # with NAs
+ frame = float_frame.copy()
+ frame["A"][:5] = np.nan
+ frame["B"][5:10] = np.nan
+ result = float_frame.cov(min_periods=len(float_frame) - 8)
+ expected = float_frame.cov()
+ expected.loc["A", "B"] = np.nan
+ expected.loc["B", "A"] = np.nan
+
+ # regular
+ float_frame["A"][:5] = np.nan
+ float_frame["B"][:10] = np.nan
+ cov = float_frame.cov()
+
+ tm.assert_almost_equal(cov["A"]["C"], float_frame["A"].cov(float_frame["C"]))
+
+ # exclude non-numeric types
+ result = float_string_frame.cov()
+ expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov()
+ tm.assert_frame_equal(result, expected)
+
+ # Single column frame
+ df = DataFrame(np.linspace(0.0, 1.0, 10))
+ result = df.cov()
+ expected = DataFrame(
+ np.cov(df.values.T).reshape((1, 1)), index=df.columns, columns=df.columns
+ )
+ tm.assert_frame_equal(result, expected)
+ df.loc[0] = np.nan
+ result = df.cov()
+ expected = DataFrame(
+ np.cov(df.values[1:].T).reshape((1, 1)),
+ index=df.columns,
+ columns=df.columns,
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+class TestDataFrameCorr:
+ # DataFrame.corr(), as opposed to DataFrame.corrwith
+
+ @staticmethod
+ def _check_method(frame, method="pearson"):
+ correls = frame.corr(method=method)
+ expected = frame["A"].corr(frame["C"], method=method)
+ tm.assert_almost_equal(correls["A"]["C"], expected)
+
+ @td.skip_if_no_scipy
+ def test_corr_pearson(self, float_frame):
+ float_frame["A"][:5] = np.nan
+ float_frame["B"][5:10] = np.nan
+
+ self._check_method(float_frame, "pearson")
+
+ @td.skip_if_no_scipy
+ def test_corr_kendall(self, float_frame):
+ float_frame["A"][:5] = np.nan
+ float_frame["B"][5:10] = np.nan
+
+ self._check_method(float_frame, "kendall")
+
+ @td.skip_if_no_scipy
+ def test_corr_spearman(self, float_frame):
+ float_frame["A"][:5] = np.nan
+ float_frame["B"][5:10] = np.nan
+
+ self._check_method(float_frame, "spearman")
+
+ # ---------------------------------------------------------------------
+
+ @td.skip_if_no_scipy
+ def test_corr_non_numeric(self, float_frame, float_string_frame):
+ float_frame["A"][:5] = np.nan
+ float_frame["B"][5:10] = np.nan
+
+ # exclude non-numeric types
+ result = float_string_frame.corr()
+ expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].corr()
+ tm.assert_frame_equal(result, expected)
+
+ @td.skip_if_no_scipy
+ @pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
+ def test_corr_nooverlap(self, meth):
+ # nothing in common
+ df = DataFrame(
+ {
+ "A": [1, 1.5, 1, np.nan, np.nan, np.nan],
+ "B": [np.nan, np.nan, np.nan, 1, 1.5, 1],
+ "C": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
+ }
+ )
+ rs = df.corr(meth)
+ assert isna(rs.loc["A", "B"])
+ assert isna(rs.loc["B", "A"])
+ assert rs.loc["A", "A"] == 1
+ assert rs.loc["B", "B"] == 1
+ assert isna(rs.loc["C", "C"])
+
+ @td.skip_if_no_scipy
+ @pytest.mark.parametrize("meth", ["pearson", "spearman"])
+ def test_corr_constant(self, meth):
+ # constant --> all NA
+
+ df = DataFrame(
+ {
+ "A": [1, 1, 1, np.nan, np.nan, np.nan],
+ "B": [np.nan, np.nan, np.nan, 1, 1, 1],
+ }
+ )
+ rs = df.corr(meth)
+ assert isna(rs.values).all()
+
+ @td.skip_if_no_scipy
+ def test_corr_int_and_boolean(self):
+ # when dtypes of pandas series are different
+ # then ndarray will have dtype=object,
+ # so it need to be properly handled
+ df = DataFrame({"a": [True, False], "b": [1, 0]})
+
+ expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
+ for meth in ["pearson", "kendall", "spearman"]:
+
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("ignore", RuntimeWarning)
+ result = df.corr(meth)
+ tm.assert_frame_equal(result, expected)
+
+ def test_corr_cov_independent_index_column(self):
+ # GH#14617
+ df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4), columns=list("abcd"))
+ for method in ["cov", "corr"]:
+ result = getattr(df, method)()
+ assert result.index is not result.columns
+ assert result.index.equals(result.columns)
+
+ def test_corr_invalid_method(self):
+ # GH#22298
+ df = pd.DataFrame(np.random.normal(size=(10, 2)))
+ msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
+ with pytest.raises(ValueError, match=msg):
+ df.corr(method="____")
+
+ def test_corr_int(self):
+ # dtypes other than float64 GH#1761
+ df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
+
+ df3.cov()
+ df3.corr()
+
+
+class TestDataFrameCorrWith:
+ def test_corrwith(self, datetime_frame):
+ a = datetime_frame
+ noise = Series(np.random.randn(len(a)), index=a.index)
+
+ b = datetime_frame.add(noise, axis=0)
+
+ # make sure order does not matter
+ b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
+ del b["B"]
+
+ colcorr = a.corrwith(b, axis=0)
+ tm.assert_almost_equal(colcorr["A"], a["A"].corr(b["A"]))
+
+ rowcorr = a.corrwith(b, axis=1)
+ tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
+
+ dropped = a.corrwith(b, axis=0, drop=True)
+ tm.assert_almost_equal(dropped["A"], a["A"].corr(b["A"]))
+ assert "B" not in dropped
+
+ dropped = a.corrwith(b, axis=1, drop=True)
+ assert a.index[-1] not in dropped.index
+
+ # non time-series data
+ index = ["a", "b", "c", "d", "e"]
+ columns = ["one", "two", "three", "four"]
+ df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
+ df2 = DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
+ correls = df1.corrwith(df2, axis=1)
+ for row in index[:4]:
+ tm.assert_almost_equal(correls[row], df1.loc[row].corr(df2.loc[row]))
+
+ def test_corrwith_with_objects(self):
+ df1 = tm.makeTimeDataFrame()
+ df2 = tm.makeTimeDataFrame()
+ cols = ["A", "B", "C", "D"]
+
+ df1["obj"] = "foo"
+ df2["obj"] = "bar"
+
+ result = df1.corrwith(df2)
+ expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
+ tm.assert_series_equal(result, expected)
+
+ result = df1.corrwith(df2, axis=1)
+ expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
+ tm.assert_series_equal(result, expected)
+
+ def test_corrwith_series(self, datetime_frame):
+ result = datetime_frame.corrwith(datetime_frame["A"])
+ expected = datetime_frame.apply(datetime_frame["A"].corr)
+
+ tm.assert_series_equal(result, expected)
+
+ def test_corrwith_matches_corrcoef(self):
+ df1 = DataFrame(np.arange(10000), columns=["a"])
+ df2 = DataFrame(np.arange(10000) ** 2, columns=["a"])
+ c1 = df1.corrwith(df2)["a"]
+ c2 = np.corrcoef(df1["a"], df2["a"])[0][1]
+
+ tm.assert_almost_equal(c1, c2)
+ assert c1 < 1
+
+ def test_corrwith_mixed_dtypes(self):
+ # GH#18570
+ df = pd.DataFrame(
+ {"a": [1, 4, 3, 2], "b": [4, 6, 7, 3], "c": ["a", "b", "c", "d"]}
+ )
+ s = pd.Series([0, 6, 7, 3])
+ result = df.corrwith(s)
+ corrs = [df["a"].corr(s), df["b"].corr(s)]
+ expected = pd.Series(data=corrs, index=["a", "b"])
+ tm.assert_series_equal(result, expected)
+
+ def test_corrwith_index_intersection(self):
+ df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
+ df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
+
+ result = df1.corrwith(df2, drop=True).index.sort_values()
+ expected = df1.columns.intersection(df2.columns).sort_values()
+ tm.assert_index_equal(result, expected)
+
+ def test_corrwith_index_union(self):
+ df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
+ df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
+
+ result = df1.corrwith(df2, drop=False).index.sort_values()
+ expected = df1.columns.union(df2.columns).sort_values()
+ tm.assert_index_equal(result, expected)
+
+ def test_corrwith_dup_cols(self):
+ # GH#21925
+ df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
+ df2 = df1.copy()
+ df2 = pd.concat((df2, df2[0]), axis=1)
+
+ result = df1.corrwith(df2)
+ expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
+ tm.assert_series_equal(result, expected)
+
+ @td.skip_if_no_scipy
+ def test_corrwith_spearman(self):
+ # GH#21925
+ df = pd.DataFrame(np.random.random(size=(100, 3)))
+ result = df.corrwith(df ** 2, method="spearman")
+ expected = Series(np.ones(len(result)))
+ tm.assert_series_equal(result, expected)
+
+ @td.skip_if_no_scipy
+ def test_corrwith_kendall(self):
+ # GH#21925
+ df = pd.DataFrame(np.random.random(size=(100, 3)))
+ result = df.corrwith(df ** 2, method="kendall")
+ expected = Series(np.ones(len(result)))
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_round.py b/pandas/tests/frame/methods/test_round.py
new file mode 100644
index 0000000000000..96ac012ce7892
--- /dev/null
+++ b/pandas/tests/frame/methods/test_round.py
@@ -0,0 +1,217 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Series, date_range
+import pandas.util.testing as tm
+
+
+class TestDataFrameRound:
+ def test_round(self):
+ # GH#2665
+
+ # Test that rounding an empty DataFrame does nothing
+ df = DataFrame()
+ tm.assert_frame_equal(df, df.round())
+
+ # Here's the test frame we'll be working with
+ df = DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]})
+
+ # Default round to integer (i.e. decimals=0)
+ expected_rounded = DataFrame({"col1": [1.0, 2.0, 3.0], "col2": [1.0, 2.0, 3.0]})
+ tm.assert_frame_equal(df.round(), expected_rounded)
+
+ # Round with an integer
+ decimals = 2
+ expected_rounded = DataFrame(
+ {"col1": [1.12, 2.12, 3.12], "col2": [1.23, 2.23, 3.23]}
+ )
+ tm.assert_frame_equal(df.round(decimals), expected_rounded)
+
+ # This should also work with np.round (since np.round dispatches to
+ # df.round)
+ tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
+
+ # Round with a list
+ round_list = [1, 2]
+ with pytest.raises(TypeError):
+ df.round(round_list)
+
+ # Round with a dictionary
+ expected_rounded = DataFrame(
+ {"col1": [1.1, 2.1, 3.1], "col2": [1.23, 2.23, 3.23]}
+ )
+ round_dict = {"col1": 1, "col2": 2}
+ tm.assert_frame_equal(df.round(round_dict), expected_rounded)
+
+ # Incomplete dict
+ expected_partially_rounded = DataFrame(
+ {"col1": [1.123, 2.123, 3.123], "col2": [1.2, 2.2, 3.2]}
+ )
+ partial_round_dict = {"col2": 1}
+ tm.assert_frame_equal(df.round(partial_round_dict), expected_partially_rounded)
+
+ # Dict with unknown elements
+ wrong_round_dict = {"col3": 2, "col2": 1}
+ tm.assert_frame_equal(df.round(wrong_round_dict), expected_partially_rounded)
+
+ # float input to `decimals`
+ non_int_round_dict = {"col1": 1, "col2": 0.5}
+ with pytest.raises(TypeError):
+ df.round(non_int_round_dict)
+
+ # String input
+ non_int_round_dict = {"col1": 1, "col2": "foo"}
+ with pytest.raises(TypeError):
+ df.round(non_int_round_dict)
+
+ non_int_round_Series = Series(non_int_round_dict)
+ with pytest.raises(TypeError):
+ df.round(non_int_round_Series)
+
+ # List input
+ non_int_round_dict = {"col1": 1, "col2": [1, 2]}
+ with pytest.raises(TypeError):
+ df.round(non_int_round_dict)
+
+ non_int_round_Series = Series(non_int_round_dict)
+ with pytest.raises(TypeError):
+ df.round(non_int_round_Series)
+
+ # Non integer Series inputs
+ non_int_round_Series = Series(non_int_round_dict)
+ with pytest.raises(TypeError):
+ df.round(non_int_round_Series)
+
+ non_int_round_Series = Series(non_int_round_dict)
+ with pytest.raises(TypeError):
+ df.round(non_int_round_Series)
+
+ # Negative numbers
+ negative_round_dict = {"col1": -1, "col2": -2}
+ big_df = df * 100
+ expected_neg_rounded = DataFrame(
+ {"col1": [110.0, 210, 310], "col2": [100.0, 200, 300]}
+ )
+ tm.assert_frame_equal(big_df.round(negative_round_dict), expected_neg_rounded)
+
+ # nan in Series round
+ nan_round_Series = Series({"col1": np.nan, "col2": 1})
+
+ # TODO(wesm): unused?
+ expected_nan_round = DataFrame( # noqa
+ {"col1": [1.123, 2.123, 3.123], "col2": [1.2, 2.2, 3.2]}
+ )
+
+ with pytest.raises(TypeError):
+ df.round(nan_round_Series)
+
+ # Make sure this doesn't break existing Series.round
+ tm.assert_series_equal(df["col1"].round(1), expected_rounded["col1"])
+
+ # named columns
+ # GH#11986
+ decimals = 2
+ expected_rounded = DataFrame(
+ {"col1": [1.12, 2.12, 3.12], "col2": [1.23, 2.23, 3.23]}
+ )
+ df.columns.name = "cols"
+ expected_rounded.columns.name = "cols"
+ tm.assert_frame_equal(df.round(decimals), expected_rounded)
+
+ # interaction of named columns & series
+ tm.assert_series_equal(df["col1"].round(decimals), expected_rounded["col1"])
+ tm.assert_series_equal(df.round(decimals)["col1"], expected_rounded["col1"])
+
+ def test_round_numpy(self):
+ # GH#12600
+ df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
+ out = np.round(df, decimals=0)
+ expected = DataFrame([[2.0, 1.0], [0.0, 7.0]])
+ tm.assert_frame_equal(out, expected)
+
+ msg = "the 'out' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ np.round(df, decimals=0, out=df)
+
+ def test_round_numpy_with_nan(self):
+ # See GH#14197
+ df = Series([1.53, np.nan, 0.06]).to_frame()
+ with tm.assert_produces_warning(None):
+ result = df.round()
+ expected = Series([2.0, np.nan, 0.0]).to_frame()
+ tm.assert_frame_equal(result, expected)
+
+ def test_round_mixed_type(self):
+ # GH#11885
+ df = DataFrame(
+ {
+ "col1": [1.1, 2.2, 3.3, 4.4],
+ "col2": ["1", "a", "c", "f"],
+ "col3": date_range("20111111", periods=4),
+ }
+ )
+ round_0 = DataFrame(
+ {
+ "col1": [1.0, 2.0, 3.0, 4.0],
+ "col2": ["1", "a", "c", "f"],
+ "col3": date_range("20111111", periods=4),
+ }
+ )
+ tm.assert_frame_equal(df.round(), round_0)
+ tm.assert_frame_equal(df.round(1), df)
+ tm.assert_frame_equal(df.round({"col1": 1}), df)
+ tm.assert_frame_equal(df.round({"col1": 0}), round_0)
+ tm.assert_frame_equal(df.round({"col1": 0, "col2": 1}), round_0)
+ tm.assert_frame_equal(df.round({"col3": 1}), df)
+
+ def test_round_with_duplicate_columns(self):
+ # GH#11611
+
+ df = pd.DataFrame(
+ np.random.random([3, 3]),
+ columns=["A", "B", "C"],
+ index=["first", "second", "third"],
+ )
+
+ dfs = pd.concat((df, df), axis=1)
+ rounded = dfs.round()
+ tm.assert_index_equal(rounded.index, dfs.index)
+
+ decimals = pd.Series([1, 0, 2], index=["A", "B", "A"])
+ msg = "Index of decimals must be unique"
+ with pytest.raises(ValueError, match=msg):
+ df.round(decimals)
+
+ def test_round_builtin(self):
+ # GH#11763
+ # Here's the test frame we'll be working with
+ df = DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]})
+
+ # Default round to integer (i.e. decimals=0)
+ expected_rounded = DataFrame({"col1": [1.0, 2.0, 3.0], "col2": [1.0, 2.0, 3.0]})
+ tm.assert_frame_equal(round(df), expected_rounded)
+
+ def test_round_nonunique_categorical(self):
+ # See GH#21809
+ idx = pd.CategoricalIndex(["low"] * 3 + ["hi"] * 3)
+ df = pd.DataFrame(np.random.rand(6, 3), columns=list("abc"))
+
+ expected = df.round(3)
+ expected.index = idx
+
+ df_categorical = df.copy().set_index(idx)
+ assert df_categorical.shape == (6, 3)
+ result = df_categorical.round(3)
+ assert result.shape == (6, 3)
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_round_interval_category_columns(self):
+ # GH#30063
+ columns = pd.CategoricalIndex(pd.interval_range(0, 2))
+ df = DataFrame([[0.66, 1.1], [0.3, 0.25]], columns=columns)
+
+ result = df.round()
+ expected = DataFrame([[1.0, 1.0], [0.0, 0.0]], columns=columns)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 1a241cd72ec43..9ddb14470f6e4 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1,7 +1,6 @@
from datetime import timedelta
from decimal import Decimal
import operator
-import warnings
import numpy as np
import pytest
@@ -259,277 +258,6 @@ def assert_bool_op_api(
class TestDataFrameAnalytics:
- # ---------------------------------------------------------------------
- # Correlation and covariance
-
- @td.skip_if_no_scipy
- def test_corr_pearson(self, float_frame):
- float_frame["A"][:5] = np.nan
- float_frame["B"][5:10] = np.nan
-
- self._check_method(float_frame, "pearson")
-
- @td.skip_if_no_scipy
- def test_corr_kendall(self, float_frame):
- float_frame["A"][:5] = np.nan
- float_frame["B"][5:10] = np.nan
-
- self._check_method(float_frame, "kendall")
-
- @td.skip_if_no_scipy
- def test_corr_spearman(self, float_frame):
- float_frame["A"][:5] = np.nan
- float_frame["B"][5:10] = np.nan
-
- self._check_method(float_frame, "spearman")
-
- def _check_method(self, frame, method="pearson"):
- correls = frame.corr(method=method)
- expected = frame["A"].corr(frame["C"], method=method)
- tm.assert_almost_equal(correls["A"]["C"], expected)
-
- @td.skip_if_no_scipy
- def test_corr_non_numeric(self, float_frame, float_string_frame):
- float_frame["A"][:5] = np.nan
- float_frame["B"][5:10] = np.nan
-
- # exclude non-numeric types
- result = float_string_frame.corr()
- expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].corr()
- tm.assert_frame_equal(result, expected)
-
- @td.skip_if_no_scipy
- @pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
- def test_corr_nooverlap(self, meth):
- # nothing in common
- df = DataFrame(
- {
- "A": [1, 1.5, 1, np.nan, np.nan, np.nan],
- "B": [np.nan, np.nan, np.nan, 1, 1.5, 1],
- "C": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
- }
- )
- rs = df.corr(meth)
- assert isna(rs.loc["A", "B"])
- assert isna(rs.loc["B", "A"])
- assert rs.loc["A", "A"] == 1
- assert rs.loc["B", "B"] == 1
- assert isna(rs.loc["C", "C"])
-
- @td.skip_if_no_scipy
- @pytest.mark.parametrize("meth", ["pearson", "spearman"])
- def test_corr_constant(self, meth):
- # constant --> all NA
-
- df = DataFrame(
- {
- "A": [1, 1, 1, np.nan, np.nan, np.nan],
- "B": [np.nan, np.nan, np.nan, 1, 1, 1],
- }
- )
- rs = df.corr(meth)
- assert isna(rs.values).all()
-
- def test_corr_int(self):
- # dtypes other than float64 #1761
- df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
-
- df3.cov()
- df3.corr()
-
- @td.skip_if_no_scipy
- def test_corr_int_and_boolean(self):
- # when dtypes of pandas series are different
- # then ndarray will have dtype=object,
- # so it need to be properly handled
- df = DataFrame({"a": [True, False], "b": [1, 0]})
-
- expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
- for meth in ["pearson", "kendall", "spearman"]:
-
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- result = df.corr(meth)
- tm.assert_frame_equal(result, expected)
-
- def test_corr_cov_independent_index_column(self):
- # GH 14617
- df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4), columns=list("abcd"))
- for method in ["cov", "corr"]:
- result = getattr(df, method)()
- assert result.index is not result.columns
- assert result.index.equals(result.columns)
-
- def test_corr_invalid_method(self):
- # GH 22298
- df = pd.DataFrame(np.random.normal(size=(10, 2)))
- msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
- with pytest.raises(ValueError, match=msg):
- df.corr(method="____")
-
- def test_cov(self, float_frame, float_string_frame):
- # min_periods no NAs (corner case)
- expected = float_frame.cov()
- result = float_frame.cov(min_periods=len(float_frame))
-
- tm.assert_frame_equal(expected, result)
-
- result = float_frame.cov(min_periods=len(float_frame) + 1)
- assert isna(result.values).all()
-
- # with NAs
- frame = float_frame.copy()
- frame["A"][:5] = np.nan
- frame["B"][5:10] = np.nan
- result = float_frame.cov(min_periods=len(float_frame) - 8)
- expected = float_frame.cov()
- expected.loc["A", "B"] = np.nan
- expected.loc["B", "A"] = np.nan
-
- # regular
- float_frame["A"][:5] = np.nan
- float_frame["B"][:10] = np.nan
- cov = float_frame.cov()
-
- tm.assert_almost_equal(cov["A"]["C"], float_frame["A"].cov(float_frame["C"]))
-
- # exclude non-numeric types
- result = float_string_frame.cov()
- expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov()
- tm.assert_frame_equal(result, expected)
-
- # Single column frame
- df = DataFrame(np.linspace(0.0, 1.0, 10))
- result = df.cov()
- expected = DataFrame(
- np.cov(df.values.T).reshape((1, 1)), index=df.columns, columns=df.columns
- )
- tm.assert_frame_equal(result, expected)
- df.loc[0] = np.nan
- result = df.cov()
- expected = DataFrame(
- np.cov(df.values[1:].T).reshape((1, 1)),
- index=df.columns,
- columns=df.columns,
- )
- tm.assert_frame_equal(result, expected)
-
- def test_corrwith(self, datetime_frame):
- a = datetime_frame
- noise = Series(np.random.randn(len(a)), index=a.index)
-
- b = datetime_frame.add(noise, axis=0)
-
- # make sure order does not matter
- b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
- del b["B"]
-
- colcorr = a.corrwith(b, axis=0)
- tm.assert_almost_equal(colcorr["A"], a["A"].corr(b["A"]))
-
- rowcorr = a.corrwith(b, axis=1)
- tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
-
- dropped = a.corrwith(b, axis=0, drop=True)
- tm.assert_almost_equal(dropped["A"], a["A"].corr(b["A"]))
- assert "B" not in dropped
-
- dropped = a.corrwith(b, axis=1, drop=True)
- assert a.index[-1] not in dropped.index
-
- # non time-series data
- index = ["a", "b", "c", "d", "e"]
- columns = ["one", "two", "three", "four"]
- df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
- df2 = DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
- correls = df1.corrwith(df2, axis=1)
- for row in index[:4]:
- tm.assert_almost_equal(correls[row], df1.loc[row].corr(df2.loc[row]))
-
- def test_corrwith_with_objects(self):
- df1 = tm.makeTimeDataFrame()
- df2 = tm.makeTimeDataFrame()
- cols = ["A", "B", "C", "D"]
-
- df1["obj"] = "foo"
- df2["obj"] = "bar"
-
- result = df1.corrwith(df2)
- expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
- tm.assert_series_equal(result, expected)
-
- result = df1.corrwith(df2, axis=1)
- expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
- tm.assert_series_equal(result, expected)
-
- def test_corrwith_series(self, datetime_frame):
- result = datetime_frame.corrwith(datetime_frame["A"])
- expected = datetime_frame.apply(datetime_frame["A"].corr)
-
- tm.assert_series_equal(result, expected)
-
- def test_corrwith_matches_corrcoef(self):
- df1 = DataFrame(np.arange(10000), columns=["a"])
- df2 = DataFrame(np.arange(10000) ** 2, columns=["a"])
- c1 = df1.corrwith(df2)["a"]
- c2 = np.corrcoef(df1["a"], df2["a"])[0][1]
-
- tm.assert_almost_equal(c1, c2)
- assert c1 < 1
-
- def test_corrwith_mixed_dtypes(self):
- # GH 18570
- df = pd.DataFrame(
- {"a": [1, 4, 3, 2], "b": [4, 6, 7, 3], "c": ["a", "b", "c", "d"]}
- )
- s = pd.Series([0, 6, 7, 3])
- result = df.corrwith(s)
- corrs = [df["a"].corr(s), df["b"].corr(s)]
- expected = pd.Series(data=corrs, index=["a", "b"])
- tm.assert_series_equal(result, expected)
-
- def test_corrwith_index_intersection(self):
- df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
- df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
-
- result = df1.corrwith(df2, drop=True).index.sort_values()
- expected = df1.columns.intersection(df2.columns).sort_values()
- tm.assert_index_equal(result, expected)
-
- def test_corrwith_index_union(self):
- df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
- df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
-
- result = df1.corrwith(df2, drop=False).index.sort_values()
- expected = df1.columns.union(df2.columns).sort_values()
- tm.assert_index_equal(result, expected)
-
- def test_corrwith_dup_cols(self):
- # GH 21925
- df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
- df2 = df1.copy()
- df2 = pd.concat((df2, df2[0]), axis=1)
-
- result = df1.corrwith(df2)
- expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
- tm.assert_series_equal(result, expected)
-
- @td.skip_if_no_scipy
- def test_corrwith_spearman(self):
- # GH 21925
- df = pd.DataFrame(np.random.random(size=(100, 3)))
- result = df.corrwith(df ** 2, method="spearman")
- expected = Series(np.ones(len(result)))
- tm.assert_series_equal(result, expected)
-
- @td.skip_if_no_scipy
- def test_corrwith_kendall(self):
- # GH 21925
- df = pd.DataFrame(np.random.random(size=(100, 3)))
- result = df.corrwith(df ** 2, method="kendall")
- expected = Series(np.ones(len(result)))
- tm.assert_series_equal(result, expected)
-
# ---------------------------------------------------------------------
# Reductions
@@ -1168,38 +896,6 @@ def test_sum_bools(self):
# ---------------------------------------------------------------------
# Miscellanea
- def test_count(self):
- # corner case
- frame = DataFrame()
- ct1 = frame.count(1)
- assert isinstance(ct1, Series)
-
- ct2 = frame.count(0)
- assert isinstance(ct2, Series)
-
- # GH#423
- df = DataFrame(index=range(10))
- result = df.count(1)
- expected = Series(0, index=df.index)
- tm.assert_series_equal(result, expected)
-
- df = DataFrame(columns=range(10))
- result = df.count(0)
- expected = Series(0, index=df.columns)
- tm.assert_series_equal(result, expected)
-
- df = DataFrame()
- result = df.count()
- expected = Series(0, index=[])
- tm.assert_series_equal(result, expected)
-
- def test_count_objects(self, float_string_frame):
- dm = DataFrame(float_string_frame._series)
- df = DataFrame(float_string_frame._series)
-
- tm.assert_series_equal(dm.count(), df.count())
- tm.assert_series_equal(dm.count(1), df.count(1))
-
def test_pct_change(self):
# GH#11150
pnl = DataFrame(
@@ -1452,218 +1148,6 @@ def test_any_all_level_axis_none_raises(self, method):
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level="out")
- # ---------------------------------------------------------------------
- # Rounding
-
- def test_round(self):
- # GH 2665
-
- # Test that rounding an empty DataFrame does nothing
- df = DataFrame()
- tm.assert_frame_equal(df, df.round())
-
- # Here's the test frame we'll be working with
- df = DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]})
-
- # Default round to integer (i.e. decimals=0)
- expected_rounded = DataFrame({"col1": [1.0, 2.0, 3.0], "col2": [1.0, 2.0, 3.0]})
- tm.assert_frame_equal(df.round(), expected_rounded)
-
- # Round with an integer
- decimals = 2
- expected_rounded = DataFrame(
- {"col1": [1.12, 2.12, 3.12], "col2": [1.23, 2.23, 3.23]}
- )
- tm.assert_frame_equal(df.round(decimals), expected_rounded)
-
- # This should also work with np.round (since np.round dispatches to
- # df.round)
- tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
-
- # Round with a list
- round_list = [1, 2]
- with pytest.raises(TypeError):
- df.round(round_list)
-
- # Round with a dictionary
- expected_rounded = DataFrame(
- {"col1": [1.1, 2.1, 3.1], "col2": [1.23, 2.23, 3.23]}
- )
- round_dict = {"col1": 1, "col2": 2}
- tm.assert_frame_equal(df.round(round_dict), expected_rounded)
-
- # Incomplete dict
- expected_partially_rounded = DataFrame(
- {"col1": [1.123, 2.123, 3.123], "col2": [1.2, 2.2, 3.2]}
- )
- partial_round_dict = {"col2": 1}
- tm.assert_frame_equal(df.round(partial_round_dict), expected_partially_rounded)
-
- # Dict with unknown elements
- wrong_round_dict = {"col3": 2, "col2": 1}
- tm.assert_frame_equal(df.round(wrong_round_dict), expected_partially_rounded)
-
- # float input to `decimals`
- non_int_round_dict = {"col1": 1, "col2": 0.5}
- with pytest.raises(TypeError):
- df.round(non_int_round_dict)
-
- # String input
- non_int_round_dict = {"col1": 1, "col2": "foo"}
- with pytest.raises(TypeError):
- df.round(non_int_round_dict)
-
- non_int_round_Series = Series(non_int_round_dict)
- with pytest.raises(TypeError):
- df.round(non_int_round_Series)
-
- # List input
- non_int_round_dict = {"col1": 1, "col2": [1, 2]}
- with pytest.raises(TypeError):
- df.round(non_int_round_dict)
-
- non_int_round_Series = Series(non_int_round_dict)
- with pytest.raises(TypeError):
- df.round(non_int_round_Series)
-
- # Non integer Series inputs
- non_int_round_Series = Series(non_int_round_dict)
- with pytest.raises(TypeError):
- df.round(non_int_round_Series)
-
- non_int_round_Series = Series(non_int_round_dict)
- with pytest.raises(TypeError):
- df.round(non_int_round_Series)
-
- # Negative numbers
- negative_round_dict = {"col1": -1, "col2": -2}
- big_df = df * 100
- expected_neg_rounded = DataFrame(
- {"col1": [110.0, 210, 310], "col2": [100.0, 200, 300]}
- )
- tm.assert_frame_equal(big_df.round(negative_round_dict), expected_neg_rounded)
-
- # nan in Series round
- nan_round_Series = Series({"col1": np.nan, "col2": 1})
-
- # TODO(wesm): unused?
- expected_nan_round = DataFrame( # noqa
- {"col1": [1.123, 2.123, 3.123], "col2": [1.2, 2.2, 3.2]}
- )
-
- with pytest.raises(TypeError):
- df.round(nan_round_Series)
-
- # Make sure this doesn't break existing Series.round
- tm.assert_series_equal(df["col1"].round(1), expected_rounded["col1"])
-
- # named columns
- # GH 11986
- decimals = 2
- expected_rounded = DataFrame(
- {"col1": [1.12, 2.12, 3.12], "col2": [1.23, 2.23, 3.23]}
- )
- df.columns.name = "cols"
- expected_rounded.columns.name = "cols"
- tm.assert_frame_equal(df.round(decimals), expected_rounded)
-
- # interaction of named columns & series
- tm.assert_series_equal(df["col1"].round(decimals), expected_rounded["col1"])
- tm.assert_series_equal(df.round(decimals)["col1"], expected_rounded["col1"])
-
- def test_numpy_round(self):
- # GH 12600
- df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
- out = np.round(df, decimals=0)
- expected = DataFrame([[2.0, 1.0], [0.0, 7.0]])
- tm.assert_frame_equal(out, expected)
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.round(df, decimals=0, out=df)
-
- def test_numpy_round_nan(self):
- # See gh-14197
- df = Series([1.53, np.nan, 0.06]).to_frame()
- with tm.assert_produces_warning(None):
- result = df.round()
- expected = Series([2.0, np.nan, 0.0]).to_frame()
- tm.assert_frame_equal(result, expected)
-
- def test_round_mixed_type(self):
- # GH 11885
- df = DataFrame(
- {
- "col1": [1.1, 2.2, 3.3, 4.4],
- "col2": ["1", "a", "c", "f"],
- "col3": date_range("20111111", periods=4),
- }
- )
- round_0 = DataFrame(
- {
- "col1": [1.0, 2.0, 3.0, 4.0],
- "col2": ["1", "a", "c", "f"],
- "col3": date_range("20111111", periods=4),
- }
- )
- tm.assert_frame_equal(df.round(), round_0)
- tm.assert_frame_equal(df.round(1), df)
- tm.assert_frame_equal(df.round({"col1": 1}), df)
- tm.assert_frame_equal(df.round({"col1": 0}), round_0)
- tm.assert_frame_equal(df.round({"col1": 0, "col2": 1}), round_0)
- tm.assert_frame_equal(df.round({"col3": 1}), df)
-
- def test_round_issue(self):
- # GH 11611
-
- df = pd.DataFrame(
- np.random.random([3, 3]),
- columns=["A", "B", "C"],
- index=["first", "second", "third"],
- )
-
- dfs = pd.concat((df, df), axis=1)
- rounded = dfs.round()
- tm.assert_index_equal(rounded.index, dfs.index)
-
- decimals = pd.Series([1, 0, 2], index=["A", "B", "A"])
- msg = "Index of decimals must be unique"
- with pytest.raises(ValueError, match=msg):
- df.round(decimals)
-
- def test_built_in_round(self):
- # GH 11763
- # Here's the test frame we'll be working with
- df = DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]})
-
- # Default round to integer (i.e. decimals=0)
- expected_rounded = DataFrame({"col1": [1.0, 2.0, 3.0], "col2": [1.0, 2.0, 3.0]})
- tm.assert_frame_equal(round(df), expected_rounded)
-
- def test_round_nonunique_categorical(self):
- # See GH21809
- idx = pd.CategoricalIndex(["low"] * 3 + ["hi"] * 3)
- df = pd.DataFrame(np.random.rand(6, 3), columns=list("abc"))
-
- expected = df.round(3)
- expected.index = idx
-
- df_categorical = df.copy().set_index(idx)
- assert df_categorical.shape == (6, 3)
- result = df_categorical.round(3)
- assert result.shape == (6, 3)
-
- tm.assert_frame_equal(result, expected)
-
- def test_round_interval_category_columns(self):
- # GH 30063
- columns = pd.CategoricalIndex(pd.interval_range(0, 2))
- df = DataFrame([[0.66, 1.1], [0.3, 0.25]], columns=columns)
-
- result = df.round()
- expected = DataFrame([[1.0, 1.0], [0.0, 0.0]], columns=columns)
- tm.assert_frame_equal(result, expected)
-
# ---------------------------------------------------------------------
# Matrix-like
diff --git a/pandas/tests/series/methods/test_count.py b/pandas/tests/series/methods/test_count.py
new file mode 100644
index 0000000000000..9cf776c0d9f1a
--- /dev/null
+++ b/pandas/tests/series/methods/test_count.py
@@ -0,0 +1,38 @@
+import numpy as np
+
+import pandas as pd
+from pandas import Categorical, MultiIndex, Series
+import pandas.util.testing as tm
+
+
+class TestSeriesCount:
+ def test_count(self, datetime_series):
+ assert datetime_series.count() == len(datetime_series)
+
+ datetime_series[::2] = np.NaN
+
+ assert datetime_series.count() == np.isfinite(datetime_series).sum()
+
+ mi = MultiIndex.from_arrays([list("aabbcc"), [1, 2, 2, np.nan, 1, 2]])
+ ts = Series(np.arange(len(mi)), index=mi)
+
+ left = ts.count(level=1)
+ right = Series([2, 3, 1], index=[1, 2, np.nan])
+ tm.assert_series_equal(left, right)
+
+ ts.iloc[[0, 3, 5]] = np.nan
+ tm.assert_series_equal(ts.count(level=1), right - 1)
+
+ # GH#29478
+ with pd.option_context("use_inf_as_na", True):
+ assert pd.Series([pd.Timestamp("1990/1/1")]).count() == 1
+
+ def test_count_categorical(self):
+
+ ser = Series(
+ Categorical(
+ [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
+ )
+ )
+ result = ser.count()
+ assert result == 2
diff --git a/pandas/tests/series/methods/test_cov_corr.py b/pandas/tests/series/methods/test_cov_corr.py
new file mode 100644
index 0000000000000..f7dae87018419
--- /dev/null
+++ b/pandas/tests/series/methods/test_cov_corr.py
@@ -0,0 +1,158 @@
+import numpy as np
+import pytest
+
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import Series, isna
+import pandas.util.testing as tm
+
+
+class TestSeriesCov:
+ def test_cov(self, datetime_series):
+ # full overlap
+ tm.assert_almost_equal(
+ datetime_series.cov(datetime_series), datetime_series.std() ** 2
+ )
+
+ # partial overlap
+ tm.assert_almost_equal(
+ datetime_series[:15].cov(datetime_series[5:]),
+ datetime_series[5:15].std() ** 2,
+ )
+
+ # No overlap
+ assert np.isnan(datetime_series[::2].cov(datetime_series[1::2]))
+
+ # all NA
+ cp = datetime_series[:10].copy()
+ cp[:] = np.nan
+ assert isna(cp.cov(cp))
+
+ # min_periods
+ assert isna(datetime_series[:15].cov(datetime_series[5:], min_periods=12))
+
+ ts1 = datetime_series[:15].reindex(datetime_series.index)
+ ts2 = datetime_series[5:].reindex(datetime_series.index)
+ assert isna(ts1.cov(ts2, min_periods=12))
+
+
+class TestSeriesCorr:
+ @td.skip_if_no_scipy
+ def test_corr(self, datetime_series):
+ import scipy.stats as stats
+
+ # full overlap
+ tm.assert_almost_equal(datetime_series.corr(datetime_series), 1)
+
+ # partial overlap
+ tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]), 1)
+
+ assert isna(datetime_series[:15].corr(datetime_series[5:], min_periods=12))
+
+ ts1 = datetime_series[:15].reindex(datetime_series.index)
+ ts2 = datetime_series[5:].reindex(datetime_series.index)
+ assert isna(ts1.corr(ts2, min_periods=12))
+
+ # No overlap
+ assert np.isnan(datetime_series[::2].corr(datetime_series[1::2]))
+
+ # all NA
+ cp = datetime_series[:10].copy()
+ cp[:] = np.nan
+ assert isna(cp.corr(cp))
+
+ A = tm.makeTimeSeries()
+ B = tm.makeTimeSeries()
+ result = A.corr(B)
+ expected, _ = stats.pearsonr(A, B)
+ tm.assert_almost_equal(result, expected)
+
+ @td.skip_if_no_scipy
+ def test_corr_rank(self):
+ import scipy.stats as stats
+
+ # kendall and spearman
+ A = tm.makeTimeSeries()
+ B = tm.makeTimeSeries()
+ A[-5:] = A[:5]
+ result = A.corr(B, method="kendall")
+ expected = stats.kendalltau(A, B)[0]
+ tm.assert_almost_equal(result, expected)
+
+ result = A.corr(B, method="spearman")
+ expected = stats.spearmanr(A, B)[0]
+ tm.assert_almost_equal(result, expected)
+
+ # results from R
+ A = Series(
+ [
+ -0.89926396,
+ 0.94209606,
+ -1.03289164,
+ -0.95445587,
+ 0.76910310,
+ -0.06430576,
+ -2.09704447,
+ 0.40660407,
+ -0.89926396,
+ 0.94209606,
+ ]
+ )
+ B = Series(
+ [
+ -1.01270225,
+ -0.62210117,
+ -1.56895827,
+ 0.59592943,
+ -0.01680292,
+ 1.17258718,
+ -1.06009347,
+ -0.10222060,
+ -0.89076239,
+ 0.89372375,
+ ]
+ )
+ kexp = 0.4319297
+ sexp = 0.5853767
+ tm.assert_almost_equal(A.corr(B, method="kendall"), kexp)
+ tm.assert_almost_equal(A.corr(B, method="spearman"), sexp)
+
+ def test_corr_invalid_method(self):
+ # GH PR #22298
+ s1 = pd.Series(np.random.randn(10))
+ s2 = pd.Series(np.random.randn(10))
+ msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
+ with pytest.raises(ValueError, match=msg):
+ s1.corr(s2, method="____")
+
+ def test_corr_callable_method(self, datetime_series):
+ # simple correlation example
+ # returns 1 if exact equality, 0 otherwise
+ my_corr = lambda a, b: 1.0 if (a == b).all() else 0.0
+
+ # simple example
+ s1 = Series([1, 2, 3, 4, 5])
+ s2 = Series([5, 4, 3, 2, 1])
+ expected = 0
+ tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)
+
+ # full overlap
+ tm.assert_almost_equal(
+ datetime_series.corr(datetime_series, method=my_corr), 1.0
+ )
+
+ # partial overlap
+ tm.assert_almost_equal(
+ datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0
+ )
+
+ # No overlap
+ assert np.isnan(
+ datetime_series[::2].corr(datetime_series[1::2], method=my_corr)
+ )
+
+ # dataframe example
+ df = pd.DataFrame([s1, s2])
+ expected = pd.DataFrame([{0: 1.0, 1: 0}, {0: 0, 1: 1.0}])
+ tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)
diff --git a/pandas/tests/series/methods/test_round.py b/pandas/tests/series/methods/test_round.py
new file mode 100644
index 0000000000000..1776468ef5a83
--- /dev/null
+++ b/pandas/tests/series/methods/test_round.py
@@ -0,0 +1,46 @@
+import numpy as np
+import pytest
+
+from pandas import Series
+import pandas.util.testing as tm
+
+
+class TestSeriesRound:
+ def test_round(self, datetime_series):
+ datetime_series.index.name = "index_name"
+ result = datetime_series.round(2)
+ expected = Series(
+ np.round(datetime_series.values, 2), index=datetime_series.index, name="ts"
+ )
+ tm.assert_series_equal(result, expected)
+ assert result.name == datetime_series.name
+
+ def test_round_numpy(self):
+ # See GH#12600
+ ser = Series([1.53, 1.36, 0.06])
+ out = np.round(ser, decimals=0)
+ expected = Series([2.0, 1.0, 0.0])
+ tm.assert_series_equal(out, expected)
+
+ msg = "the 'out' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ np.round(ser, decimals=0, out=ser)
+
+ def test_round_numpy_with_nan(self):
+ # See GH#14197
+ ser = Series([1.53, np.nan, 0.06])
+ with tm.assert_produces_warning(None):
+ result = ser.round()
+ expected = Series([2.0, np.nan, 0.0])
+ tm.assert_series_equal(result, expected)
+
+ def test_round_builtin(self):
+ ser = Series([1.123, 2.123, 3.123], index=range(3))
+ result = round(ser)
+ expected_rounded0 = Series([1.0, 2.0, 3.0], index=range(3))
+ tm.assert_series_equal(result, expected_rounded0)
+
+ decimals = 2
+ expected_rounded = Series([1.12, 2.12, 3.12], index=range(3))
+ result = round(ser, decimals)
+ tm.assert_series_equal(result, expected_rounded)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index b43dcc5e52c55..17cf307a04d7f 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -89,218 +89,12 @@ def test_numpy_compress(self):
with pytest.raises(ValueError, match=msg):
np.compress(cond, s, out=s)
- def test_round(self, datetime_series):
- datetime_series.index.name = "index_name"
- result = datetime_series.round(2)
- expected = Series(
- np.round(datetime_series.values, 2), index=datetime_series.index, name="ts"
- )
- tm.assert_series_equal(result, expected)
- assert result.name == datetime_series.name
-
- def test_numpy_round(self):
- # See gh-12600
- s = Series([1.53, 1.36, 0.06])
- out = np.round(s, decimals=0)
- expected = Series([2.0, 1.0, 0.0])
- tm.assert_series_equal(out, expected)
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.round(s, decimals=0, out=s)
-
- def test_numpy_round_nan(self):
- # See gh-14197
- s = Series([1.53, np.nan, 0.06])
- with tm.assert_produces_warning(None):
- result = s.round()
- expected = Series([2.0, np.nan, 0.0])
- tm.assert_series_equal(result, expected)
-
- def test_built_in_round(self):
- s = Series([1.123, 2.123, 3.123], index=range(3))
- result = round(s)
- expected_rounded0 = Series([1.0, 2.0, 3.0], index=range(3))
- tm.assert_series_equal(result, expected_rounded0)
-
- decimals = 2
- expected_rounded = Series([1.12, 2.12, 3.12], index=range(3))
- result = round(s, decimals)
- tm.assert_series_equal(result, expected_rounded)
-
def test_prod_numpy16_bug(self):
s = Series([1.0, 1.0, 1.0], index=range(3))
result = s.prod()
assert not isinstance(result, Series)
- @td.skip_if_no_scipy
- def test_corr(self, datetime_series):
- import scipy.stats as stats
-
- # full overlap
- tm.assert_almost_equal(datetime_series.corr(datetime_series), 1)
-
- # partial overlap
- tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]), 1)
-
- assert isna(datetime_series[:15].corr(datetime_series[5:], min_periods=12))
-
- ts1 = datetime_series[:15].reindex(datetime_series.index)
- ts2 = datetime_series[5:].reindex(datetime_series.index)
- assert isna(ts1.corr(ts2, min_periods=12))
-
- # No overlap
- assert np.isnan(datetime_series[::2].corr(datetime_series[1::2]))
-
- # all NA
- cp = datetime_series[:10].copy()
- cp[:] = np.nan
- assert isna(cp.corr(cp))
-
- A = tm.makeTimeSeries()
- B = tm.makeTimeSeries()
- result = A.corr(B)
- expected, _ = stats.pearsonr(A, B)
- tm.assert_almost_equal(result, expected)
-
- @td.skip_if_no_scipy
- def test_corr_rank(self):
- import scipy.stats as stats
-
- # kendall and spearman
- A = tm.makeTimeSeries()
- B = tm.makeTimeSeries()
- A[-5:] = A[:5]
- result = A.corr(B, method="kendall")
- expected = stats.kendalltau(A, B)[0]
- tm.assert_almost_equal(result, expected)
-
- result = A.corr(B, method="spearman")
- expected = stats.spearmanr(A, B)[0]
- tm.assert_almost_equal(result, expected)
-
- # results from R
- A = Series(
- [
- -0.89926396,
- 0.94209606,
- -1.03289164,
- -0.95445587,
- 0.76910310,
- -0.06430576,
- -2.09704447,
- 0.40660407,
- -0.89926396,
- 0.94209606,
- ]
- )
- B = Series(
- [
- -1.01270225,
- -0.62210117,
- -1.56895827,
- 0.59592943,
- -0.01680292,
- 1.17258718,
- -1.06009347,
- -0.10222060,
- -0.89076239,
- 0.89372375,
- ]
- )
- kexp = 0.4319297
- sexp = 0.5853767
- tm.assert_almost_equal(A.corr(B, method="kendall"), kexp)
- tm.assert_almost_equal(A.corr(B, method="spearman"), sexp)
-
- def test_corr_invalid_method(self):
- # GH PR #22298
- s1 = pd.Series(np.random.randn(10))
- s2 = pd.Series(np.random.randn(10))
- msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
- with pytest.raises(ValueError, match=msg):
- s1.corr(s2, method="____")
-
- def test_corr_callable_method(self, datetime_series):
- # simple correlation example
- # returns 1 if exact equality, 0 otherwise
- my_corr = lambda a, b: 1.0 if (a == b).all() else 0.0
-
- # simple example
- s1 = Series([1, 2, 3, 4, 5])
- s2 = Series([5, 4, 3, 2, 1])
- expected = 0
- tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)
-
- # full overlap
- tm.assert_almost_equal(
- datetime_series.corr(datetime_series, method=my_corr), 1.0
- )
-
- # partial overlap
- tm.assert_almost_equal(
- datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0
- )
-
- # No overlap
- assert np.isnan(
- datetime_series[::2].corr(datetime_series[1::2], method=my_corr)
- )
-
- # dataframe example
- df = pd.DataFrame([s1, s2])
- expected = pd.DataFrame([{0: 1.0, 1: 0}, {0: 0, 1: 1.0}])
- tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)
-
- def test_cov(self, datetime_series):
- # full overlap
- tm.assert_almost_equal(
- datetime_series.cov(datetime_series), datetime_series.std() ** 2
- )
-
- # partial overlap
- tm.assert_almost_equal(
- datetime_series[:15].cov(datetime_series[5:]),
- datetime_series[5:15].std() ** 2,
- )
-
- # No overlap
- assert np.isnan(datetime_series[::2].cov(datetime_series[1::2]))
-
- # all NA
- cp = datetime_series[:10].copy()
- cp[:] = np.nan
- assert isna(cp.cov(cp))
-
- # min_periods
- assert isna(datetime_series[:15].cov(datetime_series[5:], min_periods=12))
-
- ts1 = datetime_series[:15].reindex(datetime_series.index)
- ts2 = datetime_series[5:].reindex(datetime_series.index)
- assert isna(ts1.cov(ts2, min_periods=12))
-
- def test_count(self, datetime_series):
- assert datetime_series.count() == len(datetime_series)
-
- datetime_series[::2] = np.NaN
-
- assert datetime_series.count() == np.isfinite(datetime_series).sum()
-
- mi = MultiIndex.from_arrays([list("aabbcc"), [1, 2, 2, np.nan, 1, 2]])
- ts = Series(np.arange(len(mi)), index=mi)
-
- left = ts.count(level=1)
- right = Series([2, 3, 1], index=[1, 2, np.nan])
- tm.assert_series_equal(left, right)
-
- ts.iloc[[0, 3, 5]] = np.nan
- tm.assert_series_equal(ts.count(level=1), right - 1)
-
- # GH29478
- with pd.option_context("use_inf_as_na", True):
- assert pd.Series([pd.Timestamp("1990/1/1")]).count() == 1
-
def test_dot(self):
a = Series(np.random.randn(4), index=["p", "q", "r", "s"])
b = DataFrame(
@@ -606,16 +400,6 @@ def test_validate_stat_keepdims(self):
class TestCategoricalSeriesAnalytics:
- def test_count(self):
-
- s = Series(
- Categorical(
- [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
- )
- )
- result = s.count()
- assert result == 2
-
@pytest.mark.parametrize(
"dtype",
["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"],
| in the process of organizing these, some parametrization opportunities become clear. Didnt do them in this PR to keep this just a moving-around diff | https://api.github.com/repos/pandas-dev/pandas/pulls/30437 | 2019-12-23T21:25:24Z | 2019-12-24T13:05:54Z | 2019-12-24T13:05:54Z | 2019-12-24T17:02:30Z |
ENH: Allow scatter plot to plot objects and datetime type data | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index a6ba7770dadcc..510e19c2f3ef0 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -848,6 +848,7 @@ Plotting
- :func:`set_option` now validates that the plot backend provided to ``'plotting.backend'`` implements the backend when the option is set, rather than when a plot is created (:issue:`28163`)
- :meth:`DataFrame.plot` now allow a ``backend`` keyword argument to allow changing between backends in one session (:issue:`28619`).
- Bug in color validation incorrectly raising for non-color styles (:issue:`29122`).
+- Allow :meth: `DataFrame.plot.scatter` to plot ``objects`` and ``datetime`` type data (:issue:`18755`, :issue:`30391`)
- Bug in :meth:`DataFrame.hist`, ``xrot=0`` does not work with ``by`` and subplots (:issue:`30288`).
Groupby/resample/rolling
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 6da13f188357c..609da140a3f0b 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -395,6 +395,10 @@ def _compute_plot_data(self):
include_type = [np.number]
exclude_type = ["timedelta"]
+ # GH 18755, include object and category type for scatter plot
+ if self._kind == "scatter":
+ include_type.extend(["object", "category"])
+
numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type)
try:
@@ -866,10 +870,13 @@ def __init__(self, data, x, y, **kwargs):
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
- if len(self.data[x]._get_numeric_data()) == 0:
- raise ValueError(self._kind + " requires x column to be numeric")
- if len(self.data[y]._get_numeric_data()) == 0:
- raise ValueError(self._kind + " requires y column to be numeric")
+
+ # Scatter plot allows to plot objects data
+ if self._kind == "hexbin":
+ if len(self.data[x]._get_numeric_data()) == 0:
+ raise ValueError(self._kind + " requires x column to be numeric")
+ if len(self.data[y]._get_numeric_data()) == 0:
+ raise ValueError(self._kind + " requires y column to be numeric")
self.x = x
self.y = y
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 4fcdc350bc90a..a9ab9d84dbc2f 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -1162,6 +1162,27 @@ def test_plot_scatter(self):
axes = df.plot(x="x", y="y", kind="scatter", subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+ def test_scatterplot_datetime_data(self):
+ # GH 30391
+ dates = pd.date_range(start=date(2019, 1, 1), periods=12, freq="W")
+ vals = np.random.normal(0, 1, len(dates))
+ df = pd.DataFrame({"dates": dates, "vals": vals})
+
+ _check_plot_works(df.plot.scatter, x="dates", y="vals")
+ _check_plot_works(df.plot.scatter, x=0, y=1)
+
+ def test_scatterplot_object_data(self):
+ # GH 18755
+ df = pd.DataFrame(dict(a=["A", "B", "C"], b=[2, 3, 4]))
+
+ _check_plot_works(df.plot.scatter, x="a", y="b")
+ _check_plot_works(df.plot.scatter, x=0, y=1)
+
+ df = pd.DataFrame(dict(a=["A", "B", "C"], b=["a", "b", "c"]))
+
+ _check_plot_works(df.plot.scatter, x="a", y="b")
+ _check_plot_works(df.plot.scatter, x=0, y=1)
+
@pytest.mark.slow
def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
# addressing issue #10611, to ensure colobar does not
@@ -1216,24 +1237,15 @@ def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()
+ @pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")])
@pytest.mark.slow
- def test_plot_scatter_with_categorical_data(self):
- # GH 16199
+ def test_plot_scatter_with_categorical_data(self, x, y):
+ # after fixing GH 18755, should be able to plot categorical data
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])}
)
- with pytest.raises(ValueError) as ve:
- df.plot(x="x", y="y", kind="scatter")
- ve.match("requires y column to be numeric")
-
- with pytest.raises(ValueError) as ve:
- df.plot(x="y", y="x", kind="scatter")
- ve.match("requires x column to be numeric")
-
- with pytest.raises(ValueError) as ve:
- df.plot(x="y", y="y", kind="scatter")
- ve.match("requires x column to be numeric")
+ _check_plot_works(df.plot.scatter, x=x, y=y)
@pytest.mark.slow
def test_plot_scatter_with_c(self):
| closes #18755
closes #30391
xref #8113
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Because this is a plot PR, so I put the figures gotten by the two examples in those two PRs in this description to facilitate seeing output, and more cases are tested in test files.

| https://api.github.com/repos/pandas-dev/pandas/pulls/30434 | 2019-12-23T18:21:38Z | 2020-01-01T03:39:08Z | 2020-01-01T03:39:08Z | 2020-01-01T03:39:12Z |
REF: more method-specific test files | diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py
new file mode 100644
index 0000000000000..4f311bbaa8eb9
--- /dev/null
+++ b/pandas/tests/frame/methods/test_sort_index.py
@@ -0,0 +1,231 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import CategoricalDtype, DataFrame, IntervalIndex, MultiIndex, Series
+import pandas.util.testing as tm
+
+
+class TestDataFrameSortIndex:
+ def test_sort_index_nan(self):
+ # GH#3917
+
+ # Test DataFrame with nan label
+ df = DataFrame(
+ {"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
+ index=[1, 2, 3, 4, 5, 6, np.nan],
+ )
+
+ # NaN label, ascending=True, na_position='last'
+ sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
+ expected = DataFrame(
+ {"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
+ index=[1, 2, 3, 4, 5, 6, np.nan],
+ )
+ tm.assert_frame_equal(sorted_df, expected)
+
+ # NaN label, ascending=True, na_position='first'
+ sorted_df = df.sort_index(na_position="first")
+ expected = DataFrame(
+ {"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
+ index=[np.nan, 1, 2, 3, 4, 5, 6],
+ )
+ tm.assert_frame_equal(sorted_df, expected)
+
+ # NaN label, ascending=False, na_position='last'
+ sorted_df = df.sort_index(kind="quicksort", ascending=False)
+ expected = DataFrame(
+ {"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
+ index=[6, 5, 4, 3, 2, 1, np.nan],
+ )
+ tm.assert_frame_equal(sorted_df, expected)
+
+ # NaN label, ascending=False, na_position='first'
+ sorted_df = df.sort_index(
+ kind="quicksort", ascending=False, na_position="first"
+ )
+ expected = DataFrame(
+ {"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
+ index=[np.nan, 6, 5, 4, 3, 2, 1],
+ )
+ tm.assert_frame_equal(sorted_df, expected)
+
+ def test_sort_index_multi_index(self):
+ # GH#25775, testing that sorting by index works with a multi-index.
+ df = DataFrame(
+ {"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
+ )
+ result = df.set_index(list("abc")).sort_index(level=list("ba"))
+
+ expected = DataFrame(
+ {"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
+ )
+ expected = expected.set_index(list("abc"))
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_sort_index_inplace(self):
+ frame = DataFrame(
+ np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
+ )
+
+ # axis=0
+ unordered = frame.loc[[3, 2, 4, 1]]
+ a_id = id(unordered["A"])
+ df = unordered.copy()
+ df.sort_index(inplace=True)
+ expected = frame
+ tm.assert_frame_equal(df, expected)
+ assert a_id != id(df["A"])
+
+ df = unordered.copy()
+ df.sort_index(ascending=False, inplace=True)
+ expected = frame[::-1]
+ tm.assert_frame_equal(df, expected)
+
+ # axis=1
+ unordered = frame.loc[:, ["D", "B", "C", "A"]]
+ df = unordered.copy()
+ df.sort_index(axis=1, inplace=True)
+ expected = frame
+ tm.assert_frame_equal(df, expected)
+
+ df = unordered.copy()
+ df.sort_index(axis=1, ascending=False, inplace=True)
+ expected = frame.iloc[:, ::-1]
+ tm.assert_frame_equal(df, expected)
+
+ def test_sort_index_different_sortorder(self):
+ A = np.arange(20).repeat(5)
+ B = np.tile(np.arange(5), 20)
+
+ indexer = np.random.permutation(100)
+ A = A.take(indexer)
+ B = B.take(indexer)
+
+ df = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
+
+ ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
+ expected = df.take(ex_indexer)
+
+ # test with multiindex, too
+ idf = df.set_index(["A", "B"])
+
+ result = idf.sort_index(ascending=[1, 0])
+ expected = idf.take(ex_indexer)
+ tm.assert_frame_equal(result, expected)
+
+ # also, Series!
+ result = idf["C"].sort_index(ascending=[1, 0])
+ tm.assert_series_equal(result, expected["C"])
+
+ def test_sort_index_level(self):
+ mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
+ df = DataFrame([[1, 2], [3, 4]], mi)
+
+ result = df.sort_index(level="A", sort_remaining=False)
+ expected = df
+ tm.assert_frame_equal(result, expected)
+
+ result = df.sort_index(level=["A", "B"], sort_remaining=False)
+ expected = df
+ tm.assert_frame_equal(result, expected)
+
+ # Error thrown by sort_index when
+ # first index is sorted last (GH#26053)
+ result = df.sort_index(level=["C", "B", "A"])
+ expected = df.iloc[[1, 0]]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.sort_index(level=["B", "C", "A"])
+ expected = df.iloc[[1, 0]]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.sort_index(level=["C", "A"])
+ expected = df.iloc[[1, 0]]
+ tm.assert_frame_equal(result, expected)
+
+ def test_sort_index_categorical_index(self):
+
+ df = DataFrame(
+ {
+ "A": np.arange(6, dtype="int64"),
+ "B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
+ }
+ ).set_index("B")
+
+ result = df.sort_index()
+ expected = df.iloc[[4, 0, 1, 5, 2, 3]]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.sort_index(ascending=False)
+ expected = df.iloc[[2, 3, 0, 1, 5, 4]]
+ tm.assert_frame_equal(result, expected)
+
+ def test_sort_index(self):
+ # GH#13496
+
+ frame = DataFrame(
+ np.arange(16).reshape(4, 4),
+ index=[1, 2, 3, 4],
+ columns=["A", "B", "C", "D"],
+ )
+
+ # axis=0 : sort rows by index labels
+ unordered = frame.loc[[3, 2, 4, 1]]
+ result = unordered.sort_index(axis=0)
+ expected = frame
+ tm.assert_frame_equal(result, expected)
+
+ result = unordered.sort_index(ascending=False)
+ expected = frame[::-1]
+ tm.assert_frame_equal(result, expected)
+
+ # axis=1 : sort columns by column names
+ unordered = frame.iloc[:, [2, 1, 3, 0]]
+ result = unordered.sort_index(axis=1)
+ tm.assert_frame_equal(result, frame)
+
+ result = unordered.sort_index(axis=1, ascending=False)
+ expected = frame.iloc[:, ::-1]
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("level", ["A", 0]) # GH#21052
+ def test_sort_index_multiindex(self, level):
+ # GH#13496
+
+ # sort rows by specified level of multi-index
+ mi = MultiIndex.from_tuples(
+ [[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
+ )
+ df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
+
+ expected_mi = MultiIndex.from_tuples(
+ [[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
+ )
+ expected = pd.DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
+ result = df.sort_index(level=level)
+ tm.assert_frame_equal(result, expected)
+
+ # sort_remaining=False
+ expected_mi = MultiIndex.from_tuples(
+ [[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
+ )
+ expected = pd.DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
+ result = df.sort_index(level=level, sort_remaining=False)
+ tm.assert_frame_equal(result, expected)
+
+ def test_sort_index_intervalindex(self):
+ # this is a de-facto sort via unstack
+ # confirming that we sort in the order of the bins
+ y = Series(np.random.randn(100))
+ x1 = Series(np.sign(np.random.randn(100)))
+ x2 = pd.cut(Series(np.random.randn(100)), bins=[-3, -0.5, 0, 0.5, 3])
+ model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
+
+ result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
+ expected = IntervalIndex.from_tuples(
+ [(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
+ )
+ result = result.columns.levels[1].categories
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/methods/test_sort_values.py
similarity index 59%
rename from pandas/tests/frame/test_sorting.py
rename to pandas/tests/frame/methods/test_sort_values.py
index 64294d5cdcb81..540bed452d9e9 100644
--- a/pandas/tests/frame/test_sorting.py
+++ b/pandas/tests/frame/methods/test_sort_values.py
@@ -4,21 +4,11 @@
import pytest
import pandas as pd
-from pandas import (
- Categorical,
- DataFrame,
- IntervalIndex,
- MultiIndex,
- NaT,
- Series,
- Timestamp,
- date_range,
-)
-from pandas.api.types import CategoricalDtype
+from pandas import Categorical, DataFrame, NaT, Timestamp, date_range
import pandas.util.testing as tm
-class TestDataFrameSorting:
+class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
@@ -57,7 +47,7 @@ def test_sort_values(self):
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
- # by row (axis=1): GH 10806
+ # by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
@@ -106,21 +96,69 @@ def test_sort_values_inplace(self):
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
- def test_sort_nan(self):
- # GH3917
- nan = np.nan
- df = DataFrame({"A": [1, 2, nan, 1, 6, 8, 4], "B": [9, nan, 5, 2, 5, 4, 5]})
+ def test_sort_values_multicolumn(self):
+ A = np.arange(5).repeat(20)
+ B = np.tile(np.arange(5), 20)
+ random.shuffle(A)
+ random.shuffle(B)
+ frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
+
+ result = frame.sort_values(by=["A", "B"])
+ indexer = np.lexsort((frame["B"], frame["A"]))
+ expected = frame.take(indexer)
+ tm.assert_frame_equal(result, expected)
+
+ result = frame.sort_values(by=["A", "B"], ascending=False)
+ indexer = np.lexsort(
+ (frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
+ )
+ expected = frame.take(indexer)
+ tm.assert_frame_equal(result, expected)
+
+ result = frame.sort_values(by=["B", "A"])
+ indexer = np.lexsort((frame["A"], frame["B"]))
+ expected = frame.take(indexer)
+ tm.assert_frame_equal(result, expected)
+
+ def test_sort_values_multicolumn_uint64(self):
+ # GH#9918
+ # uint64 multicolumn sort
+
+ df = pd.DataFrame(
+ {
+ "a": pd.Series([18446637057563306014, 1162265347240853609]),
+ "b": pd.Series([1, 2]),
+ }
+ )
+ df["a"] = df["a"].astype(np.uint64)
+ result = df.sort_values(["a", "b"])
+
+ expected = pd.DataFrame(
+ {
+ "a": pd.Series([18446637057563306014, 1162265347240853609]),
+ "b": pd.Series([1, 2]),
+ },
+ index=pd.Index([1, 0]),
+ )
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_sort_values_nan(self):
+ # GH#3917
+ df = DataFrame(
+ {"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
+ )
# sort one column only
expected = DataFrame(
- {"A": [nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, nan, 5, 5, 4]},
+ {"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
- {"A": [nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, nan, 9, 2]},
+ {"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
@@ -132,7 +170,7 @@ def test_sort_nan(self):
# na_position='last', order
expected = DataFrame(
- {"A": [1, 1, 2, 4, 6, 8, nan], "B": [2, 9, nan, 5, 5, 4, 5]},
+ {"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
@@ -140,7 +178,7 @@ def test_sort_nan(self):
# na_position='first', order
expected = DataFrame(
- {"A": [nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, nan, 5, 5, 4]},
+ {"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
@@ -148,7 +186,7 @@ def test_sort_nan(self):
# na_position='first', not order
expected = DataFrame(
- {"A": [nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, nan, 5, 5, 4]},
+ {"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
@@ -156,54 +194,14 @@ def test_sort_nan(self):
# na_position='last', not order
expected = DataFrame(
- {"A": [8, 6, 4, 2, 1, 1, nan], "B": [4, 5, 5, nan, 2, 9, 5]},
+ {"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
- # Test DataFrame with nan label
- df = DataFrame(
- {"A": [1, 2, nan, 1, 6, 8, 4], "B": [9, nan, 5, 2, 5, 4, 5]},
- index=[1, 2, 3, 4, 5, 6, nan],
- )
-
- # NaN label, ascending=True, na_position='last'
- sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
- expected = DataFrame(
- {"A": [1, 2, nan, 1, 6, 8, 4], "B": [9, nan, 5, 2, 5, 4, 5]},
- index=[1, 2, 3, 4, 5, 6, nan],
- )
- tm.assert_frame_equal(sorted_df, expected)
-
- # NaN label, ascending=True, na_position='first'
- sorted_df = df.sort_index(na_position="first")
- expected = DataFrame(
- {"A": [4, 1, 2, nan, 1, 6, 8], "B": [5, 9, nan, 5, 2, 5, 4]},
- index=[nan, 1, 2, 3, 4, 5, 6],
- )
- tm.assert_frame_equal(sorted_df, expected)
-
- # NaN label, ascending=False, na_position='last'
- sorted_df = df.sort_index(kind="quicksort", ascending=False)
- expected = DataFrame(
- {"A": [8, 6, 1, nan, 2, 1, 4], "B": [4, 5, 2, 5, nan, 9, 5]},
- index=[6, 5, 4, 3, 2, 1, nan],
- )
- tm.assert_frame_equal(sorted_df, expected)
-
- # NaN label, ascending=False, na_position='first'
- sorted_df = df.sort_index(
- kind="quicksort", ascending=False, na_position="first"
- )
- expected = DataFrame(
- {"A": [4, 8, 6, 1, nan, 2, 1], "B": [5, 4, 5, 2, 5, nan, 9]},
- index=[nan, 6, 5, 4, 3, 2, 1],
- )
- tm.assert_frame_equal(sorted_df, expected)
-
- def test_stable_descending_sort(self):
- # GH #6399
+ def test_sort_values_stable_descending_sort(self):
+ # GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
@@ -211,12 +209,13 @@ def test_stable_descending_sort(self):
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
- def test_stable_descending_multicolumn_sort(self):
- nan = np.nan
- df = DataFrame({"A": [1, 2, nan, 1, 6, 8, 4], "B": [9, nan, 5, 2, 5, 4, 5]})
+ def test_sort_values_stable_descending_multicolumn_sort(self):
+ df = DataFrame(
+ {"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
+ )
# test stable mergesort
expected = DataFrame(
- {"A": [nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, nan, 2, 9]},
+ {"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0],
)
sorted_df = df.sort_values(
@@ -225,7 +224,7 @@ def test_stable_descending_multicolumn_sort(self):
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
- {"A": [nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, nan, 9, 2]},
+ {"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(
@@ -233,30 +232,16 @@ def test_stable_descending_multicolumn_sort(self):
)
tm.assert_frame_equal(sorted_df, expected)
- def test_sort_multi_index(self):
- # GH 25775, testing that sorting by index works with a multi-index.
- df = DataFrame(
- {"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
- )
- result = df.set_index(list("abc")).sort_index(level=list("ba"))
-
- expected = DataFrame(
- {"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
- )
- expected = expected.set_index(list("abc"))
-
- tm.assert_frame_equal(result, expected)
-
- def test_stable_categorial(self):
- # GH 16793
+ def test_sort_values_stable_categorial(self):
+ # GH#16793
df = DataFrame({"x": pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
- def test_sort_datetimes(self):
+ def test_sort_values_datetimes(self):
- # GH 3461, argsort / lexsort differences for a datetime column
+ # GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
@@ -293,7 +278,7 @@ def test_sort_datetimes(self):
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
- def test_frame_column_inplace_sort_exception(self, float_frame):
+ def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
@@ -301,9 +286,9 @@ def test_frame_column_inplace_sort_exception(self, float_frame):
cp = s.copy()
cp.sort_values() # it works!
- def test_sort_nat_values_in_int_column(self):
+ def test_sort_values_nat_values_in_int_column(self):
- # GH 14922: "sorting with large float and multiple columns incorrect"
+ # GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
@@ -356,220 +341,8 @@ def test_sort_nat_values_in_int_column(self):
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
- def test_sort_nat(self):
-
- # GH 16836
-
- d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
- d2 = [
- Timestamp(x)
- for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
- ]
- df = pd.DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
-
- d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
- d4 = [
- Timestamp(x)
- for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
- ]
- expected = pd.DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
- sorted_df = df.sort_values(by=["a", "b"])
- tm.assert_frame_equal(sorted_df, expected)
-
-
-class TestDataFrameSortIndexKinds:
- def test_sort_index_multicolumn(self):
- A = np.arange(5).repeat(20)
- B = np.tile(np.arange(5), 20)
- random.shuffle(A)
- random.shuffle(B)
- frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
-
- result = frame.sort_values(by=["A", "B"])
- indexer = np.lexsort((frame["B"], frame["A"]))
- expected = frame.take(indexer)
- tm.assert_frame_equal(result, expected)
-
- result = frame.sort_values(by=["A", "B"], ascending=False)
- indexer = np.lexsort(
- (frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
- )
- expected = frame.take(indexer)
- tm.assert_frame_equal(result, expected)
-
- result = frame.sort_values(by=["B", "A"])
- indexer = np.lexsort((frame["A"], frame["B"]))
- expected = frame.take(indexer)
- tm.assert_frame_equal(result, expected)
-
- def test_sort_index_inplace(self):
- frame = DataFrame(
- np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
- )
-
- # axis=0
- unordered = frame.loc[[3, 2, 4, 1]]
- a_id = id(unordered["A"])
- df = unordered.copy()
- df.sort_index(inplace=True)
- expected = frame
- tm.assert_frame_equal(df, expected)
- assert a_id != id(df["A"])
-
- df = unordered.copy()
- df.sort_index(ascending=False, inplace=True)
- expected = frame[::-1]
- tm.assert_frame_equal(df, expected)
-
- # axis=1
- unordered = frame.loc[:, ["D", "B", "C", "A"]]
- df = unordered.copy()
- df.sort_index(axis=1, inplace=True)
- expected = frame
- tm.assert_frame_equal(df, expected)
-
- df = unordered.copy()
- df.sort_index(axis=1, ascending=False, inplace=True)
- expected = frame.iloc[:, ::-1]
- tm.assert_frame_equal(df, expected)
-
- def test_sort_index_different_sortorder(self):
- A = np.arange(20).repeat(5)
- B = np.tile(np.arange(5), 20)
-
- indexer = np.random.permutation(100)
- A = A.take(indexer)
- B = B.take(indexer)
-
- df = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
-
- ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
- expected = df.take(ex_indexer)
-
- # test with multiindex, too
- idf = df.set_index(["A", "B"])
-
- result = idf.sort_index(ascending=[1, 0])
- expected = idf.take(ex_indexer)
- tm.assert_frame_equal(result, expected)
-
- # also, Series!
- result = idf["C"].sort_index(ascending=[1, 0])
- tm.assert_series_equal(result, expected["C"])
-
- def test_sort_index_level(self):
- mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
- df = DataFrame([[1, 2], [3, 4]], mi)
-
- result = df.sort_index(level="A", sort_remaining=False)
- expected = df
- tm.assert_frame_equal(result, expected)
-
- result = df.sort_index(level=["A", "B"], sort_remaining=False)
- expected = df
- tm.assert_frame_equal(result, expected)
-
- # Error thrown by sort_index when
- # first index is sorted last (#26053)
- result = df.sort_index(level=["C", "B", "A"])
- expected = df.iloc[[1, 0]]
- tm.assert_frame_equal(result, expected)
-
- result = df.sort_index(level=["B", "C", "A"])
- expected = df.iloc[[1, 0]]
- tm.assert_frame_equal(result, expected)
-
- result = df.sort_index(level=["C", "A"])
- expected = df.iloc[[1, 0]]
- tm.assert_frame_equal(result, expected)
-
- def test_sort_index_categorical_index(self):
-
- df = DataFrame(
- {
- "A": np.arange(6, dtype="int64"),
- "B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
- }
- ).set_index("B")
-
- result = df.sort_index()
- expected = df.iloc[[4, 0, 1, 5, 2, 3]]
- tm.assert_frame_equal(result, expected)
-
- result = df.sort_index(ascending=False)
- expected = df.iloc[[2, 3, 0, 1, 5, 4]]
- tm.assert_frame_equal(result, expected)
-
- def test_sort_index(self):
- # GH13496
-
- frame = DataFrame(
- np.arange(16).reshape(4, 4),
- index=[1, 2, 3, 4],
- columns=["A", "B", "C", "D"],
- )
-
- # axis=0 : sort rows by index labels
- unordered = frame.loc[[3, 2, 4, 1]]
- result = unordered.sort_index(axis=0)
- expected = frame
- tm.assert_frame_equal(result, expected)
-
- result = unordered.sort_index(ascending=False)
- expected = frame[::-1]
- tm.assert_frame_equal(result, expected)
-
- # axis=1 : sort columns by column names
- unordered = frame.iloc[:, [2, 1, 3, 0]]
- result = unordered.sort_index(axis=1)
- tm.assert_frame_equal(result, frame)
-
- result = unordered.sort_index(axis=1, ascending=False)
- expected = frame.iloc[:, ::-1]
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("level", ["A", 0]) # GH 21052
- def test_sort_index_multiindex(self, level):
- # GH13496
-
- # sort rows by specified level of multi-index
- mi = MultiIndex.from_tuples(
- [[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
- )
- df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
-
- expected_mi = MultiIndex.from_tuples(
- [[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
- )
- expected = pd.DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
- result = df.sort_index(level=level)
- tm.assert_frame_equal(result, expected)
-
- # sort_remaining=False
- expected_mi = MultiIndex.from_tuples(
- [[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
- )
- expected = pd.DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
- result = df.sort_index(level=level, sort_remaining=False)
- tm.assert_frame_equal(result, expected)
-
- def test_sort_index_intervalindex(self):
- # this is a de-facto sort via unstack
- # confirming that we sort in the order of the bins
- y = Series(np.random.randn(100))
- x1 = Series(np.sign(np.random.randn(100)))
- x2 = pd.cut(Series(np.random.randn(100)), bins=[-3, -0.5, 0, 0.5, 3])
- model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
-
- result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
- expected = IntervalIndex.from_tuples(
- [(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
- )
- result = result.columns.levels[1].categories
- tm.assert_index_equal(result, expected)
-
- def test_sort_index_na_position_with_categories(self):
- # GH 22556
+ def test_sort_values_na_position_with_categories(self):
+ # GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
@@ -654,7 +427,27 @@ def test_sort_index_na_position_with_categories(self):
tm.assert_frame_equal(result, expected)
- def test_sort_index_na_position_with_categories_raises(self):
+ def test_sort_values_nat(self):
+
+ # GH#16836
+
+ d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
+ d2 = [
+ Timestamp(x)
+ for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
+ ]
+ df = pd.DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
+
+ d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
+ d4 = [
+ Timestamp(x)
+ for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
+ ]
+ expected = pd.DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
+ sorted_df = df.sort_values(by=["a", "b"])
+ tm.assert_frame_equal(sorted_df, expected)
+
+ def test_sort_values_na_position_with_categories_raises(self):
df = pd.DataFrame(
{
"c": pd.Categorical(
@@ -667,26 +460,3 @@ def test_sort_index_na_position_with_categories_raises(self):
with pytest.raises(ValueError):
df.sort_values(by="c", ascending=False, na_position="bad_position")
-
- def test_sort_multicolumn_uint64(self):
- # GH9918
- # uint64 multicolumn sort
-
- df = pd.DataFrame(
- {
- "a": pd.Series([18446637057563306014, 1162265347240853609]),
- "b": pd.Series([1, 2]),
- }
- )
- df["a"] = df["a"].astype(np.uint64)
- result = df.sort_values(["a", "b"])
-
- expected = pd.DataFrame(
- {
- "a": pd.Series([18446637057563306014, 1162265347240853609]),
- "b": pd.Series([1, 2]),
- },
- index=pd.Index([1, 0]),
- )
-
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_diff.py b/pandas/tests/series/methods/test_diff.py
index cf24c012ef349..9cb4ec827a271 100644
--- a/pandas/tests/series/methods/test_diff.py
+++ b/pandas/tests/series/methods/test_diff.py
@@ -1,3 +1,77 @@
-class TestDiff:
- # TODO: maybe we should have tests for this?
- pass
+import numpy as np
+import pytest
+
+from pandas import Series, TimedeltaIndex, date_range
+import pandas.util.testing as tm
+
+
+class TestSeriesDiff:
+ def test_diff_np(self):
+ pytest.skip("skipping due to Series no longer being an ndarray")
+
+ # no longer works as the return type of np.diff is now nd.array
+ s = Series(np.arange(5))
+
+ r = np.diff(s)
+ tm.assert_series_equal(Series([np.nan, 0, 0, 0, np.nan]), r)
+
+ def test_diff_int(self):
+ # int dtype
+ a = 10000000000000000
+ b = a + 1
+ s = Series([a, b])
+
+ result = s.diff()
+ assert result[1] == 1
+
+ def test_diff_tz(self):
+ # Combined datetime diff, normal diff and boolean diff test
+ ts = tm.makeTimeSeries(name="ts")
+ ts.diff()
+
+ # neg n
+ result = ts.diff(-1)
+ expected = ts - ts.shift(-1)
+ tm.assert_series_equal(result, expected)
+
+ # 0
+ result = ts.diff(0)
+ expected = ts - ts
+ tm.assert_series_equal(result, expected)
+
+ # datetime diff (GH#3100)
+ s = Series(date_range("20130102", periods=5))
+ result = s.diff()
+ expected = s - s.shift(1)
+ tm.assert_series_equal(result, expected)
+
+ # timedelta diff
+ result = result - result.shift(1) # previous result
+ expected = expected.diff() # previously expected
+ tm.assert_series_equal(result, expected)
+
+ # with tz
+ s = Series(
+ date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
+ )
+ result = s.diff()
+ expected = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "input,output,diff",
+ [([False, True, True, False, False], [np.nan, True, False, True, False], 1)],
+ )
+ def test_diff_bool(self, input, output, diff):
+ # boolean series (test for fixing #17294)
+ s = Series(input)
+ result = s.diff()
+ expected = Series(output)
+ tm.assert_series_equal(result, expected)
+
+ def test_diff_object_dtype(self):
+ # object series
+ s = Series([False, True, 5.0, np.nan, True, False])
+ result = s.diff()
+ expected = s - s.shift(1)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py
index ad75012d8e621..2cf847c928862 100644
--- a/pandas/tests/series/methods/test_shift.py
+++ b/pandas/tests/series/methods/test_shift.py
@@ -220,3 +220,46 @@ def test_tshift(self, datetime_series):
msg = "Freq was not given and was not set in the index"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
+
+ def test_shift_int(self, datetime_series):
+ ts = datetime_series.astype(int)
+ shifted = ts.shift(1)
+ expected = ts.astype(float).shift(1)
+ tm.assert_series_equal(shifted, expected)
+
+ def test_shift_object_non_scalar_fill(self):
+ # shift requires scalar fill_value except for object dtype
+ ser = Series(range(3))
+ with pytest.raises(ValueError, match="fill_value must be a scalar"):
+ ser.shift(1, fill_value=[])
+
+ df = ser.to_frame()
+ with pytest.raises(ValueError, match="fill_value must be a scalar"):
+ df.shift(1, fill_value=np.arange(3))
+
+ obj_ser = ser.astype(object)
+ result = obj_ser.shift(1, fill_value={})
+ assert result[0] == {}
+
+ obj_df = obj_ser.to_frame()
+ result = obj_df.shift(1, fill_value={})
+ assert result.iloc[0, 0] == {}
+
+ def test_shift_categorical(self):
+ # GH#9416
+ s = pd.Series(["a", "b", "c", "d"], dtype="category")
+
+ tm.assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna())
+
+ sp1 = s.shift(1)
+ tm.assert_index_equal(s.index, sp1.index)
+ assert np.all(sp1.values.codes[:1] == -1)
+ assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
+
+ sn2 = s.shift(-2)
+ tm.assert_index_equal(s.index, sn2.index)
+ assert np.all(sn2.values.codes[-2:] == -1)
+ assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
+
+ tm.assert_index_equal(s.values.categories, sp1.values.categories)
+ tm.assert_index_equal(s.values.categories, sn2.values.categories)
diff --git a/pandas/tests/series/methods/test_sort_index.py b/pandas/tests/series/methods/test_sort_index.py
new file mode 100644
index 0000000000000..ab15b8c814029
--- /dev/null
+++ b/pandas/tests/series/methods/test_sort_index.py
@@ -0,0 +1,137 @@
+import random
+
+import numpy as np
+import pytest
+
+from pandas import IntervalIndex, MultiIndex, Series
+import pandas.util.testing as tm
+
+
+class TestSeriesSortIndex:
+ def test_sort_index(self, datetime_series):
+ rindex = list(datetime_series.index)
+ random.shuffle(rindex)
+
+ random_order = datetime_series.reindex(rindex)
+ sorted_series = random_order.sort_index()
+ tm.assert_series_equal(sorted_series, datetime_series)
+
+ # descending
+ sorted_series = random_order.sort_index(ascending=False)
+ tm.assert_series_equal(
+ sorted_series, datetime_series.reindex(datetime_series.index[::-1])
+ )
+
+ # compat on level
+ sorted_series = random_order.sort_index(level=0)
+ tm.assert_series_equal(sorted_series, datetime_series)
+
+ # compat on axis
+ sorted_series = random_order.sort_index(axis=0)
+ tm.assert_series_equal(sorted_series, datetime_series)
+
+ msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>"
+ with pytest.raises(ValueError, match=msg):
+ random_order.sort_values(axis=1)
+
+ sorted_series = random_order.sort_index(level=0, axis=0)
+ tm.assert_series_equal(sorted_series, datetime_series)
+
+ with pytest.raises(ValueError, match=msg):
+ random_order.sort_index(level=0, axis=1)
+
+ def test_sort_index_inplace(self, datetime_series):
+
+ # For GH#11402
+ rindex = list(datetime_series.index)
+ random.shuffle(rindex)
+
+ # descending
+ random_order = datetime_series.reindex(rindex)
+ result = random_order.sort_index(ascending=False, inplace=True)
+
+ assert result is None
+ tm.assert_series_equal(
+ random_order, datetime_series.reindex(datetime_series.index[::-1])
+ )
+
+ # ascending
+ random_order = datetime_series.reindex(rindex)
+ result = random_order.sort_index(ascending=True, inplace=True)
+
+ assert result is None
+ tm.assert_series_equal(random_order, datetime_series)
+
+ def test_sort_index_level(self):
+ mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
+ s = Series([1, 2], mi)
+ backwards = s.iloc[[1, 0]]
+
+ res = s.sort_index(level="A")
+ tm.assert_series_equal(backwards, res)
+
+ res = s.sort_index(level=["A", "B"])
+ tm.assert_series_equal(backwards, res)
+
+ res = s.sort_index(level="A", sort_remaining=False)
+ tm.assert_series_equal(s, res)
+
+ res = s.sort_index(level=["A", "B"], sort_remaining=False)
+ tm.assert_series_equal(s, res)
+
+ @pytest.mark.parametrize("level", ["A", 0]) # GH#21052
+ def test_sort_index_multiindex(self, level):
+
+ mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
+ s = Series([1, 2], mi)
+ backwards = s.iloc[[1, 0]]
+
+ # implicit sort_remaining=True
+ res = s.sort_index(level=level)
+ tm.assert_series_equal(backwards, res)
+
+ # GH#13496
+ # sort has no effect without remaining lvls
+ res = s.sort_index(level=level, sort_remaining=False)
+ tm.assert_series_equal(s, res)
+
+ def test_sort_index_kind(self):
+ # GH#14444 & GH#13589: Add support for sort algo choosing
+ series = Series(index=[3, 2, 1, 4, 3], dtype=object)
+ expected_series = Series(index=[1, 2, 3, 3, 4], dtype=object)
+
+ index_sorted_series = series.sort_index(kind="mergesort")
+ tm.assert_series_equal(expected_series, index_sorted_series)
+
+ index_sorted_series = series.sort_index(kind="quicksort")
+ tm.assert_series_equal(expected_series, index_sorted_series)
+
+ index_sorted_series = series.sort_index(kind="heapsort")
+ tm.assert_series_equal(expected_series, index_sorted_series)
+
+ def test_sort_index_na_position(self):
+ series = Series(index=[3, 2, 1, 4, 3, np.nan], dtype=object)
+ expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4], dtype=object)
+
+ index_sorted_series = series.sort_index(na_position="first")
+ tm.assert_series_equal(expected_series_first, index_sorted_series)
+
+ expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan], dtype=object)
+
+ index_sorted_series = series.sort_index(na_position="last")
+ tm.assert_series_equal(expected_series_last, index_sorted_series)
+
+ def test_sort_index_intervals(self):
+ s = Series(
+ [np.nan, 1, 2, 3], IntervalIndex.from_arrays([0, 1, 2, 3], [1, 2, 3, 4])
+ )
+
+ result = s.sort_index()
+ expected = s
+ tm.assert_series_equal(result, expected)
+
+ result = s.sort_index(ascending=False)
+ expected = Series(
+ [3, 2, 1, np.nan], IntervalIndex.from_arrays([3, 2, 1, 0], [4, 3, 2, 1])
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/methods/test_sort_values.py
similarity index 57%
rename from pandas/tests/series/test_sorting.py
rename to pandas/tests/series/methods/test_sort_values.py
index fd3445e271699..ec3b8385e79e7 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/methods/test_sort_values.py
@@ -1,13 +1,11 @@
-import random
-
import numpy as np
import pytest
-from pandas import Categorical, DataFrame, IntervalIndex, MultiIndex, Series
+from pandas import Categorical, DataFrame, Series
import pandas.util.testing as tm
-class TestSeriesSorting:
+class TestSeriesSortValues:
def test_sort_values(self, datetime_series):
# check indexes are reordered corresponding with the values
@@ -73,7 +71,7 @@ def test_sort_values(self, datetime_series):
ts.index, datetime_series.sort_values(ascending=False).index
)
- # GH 5856/5853
+ # GH#5856/5853
# Series.sort_values operating on a view
df = DataFrame(np.random.randn(10, 4))
s = df.iloc[:, 0]
@@ -85,117 +83,6 @@ def test_sort_values(self, datetime_series):
with pytest.raises(ValueError, match=msg):
s.sort_values(inplace=True)
- def test_sort_index(self, datetime_series):
- rindex = list(datetime_series.index)
- random.shuffle(rindex)
-
- random_order = datetime_series.reindex(rindex)
- sorted_series = random_order.sort_index()
- tm.assert_series_equal(sorted_series, datetime_series)
-
- # descending
- sorted_series = random_order.sort_index(ascending=False)
- tm.assert_series_equal(
- sorted_series, datetime_series.reindex(datetime_series.index[::-1])
- )
-
- # compat on level
- sorted_series = random_order.sort_index(level=0)
- tm.assert_series_equal(sorted_series, datetime_series)
-
- # compat on axis
- sorted_series = random_order.sort_index(axis=0)
- tm.assert_series_equal(sorted_series, datetime_series)
-
- msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>"
- with pytest.raises(ValueError, match=msg):
- random_order.sort_values(axis=1)
-
- sorted_series = random_order.sort_index(level=0, axis=0)
- tm.assert_series_equal(sorted_series, datetime_series)
-
- with pytest.raises(ValueError, match=msg):
- random_order.sort_index(level=0, axis=1)
-
- def test_sort_index_inplace(self, datetime_series):
-
- # For #11402
- rindex = list(datetime_series.index)
- random.shuffle(rindex)
-
- # descending
- random_order = datetime_series.reindex(rindex)
- result = random_order.sort_index(ascending=False, inplace=True)
-
- assert result is None
- tm.assert_series_equal(
- random_order, datetime_series.reindex(datetime_series.index[::-1])
- )
-
- # ascending
- random_order = datetime_series.reindex(rindex)
- result = random_order.sort_index(ascending=True, inplace=True)
-
- assert result is None
- tm.assert_series_equal(random_order, datetime_series)
-
- @pytest.mark.parametrize("level", ["A", 0]) # GH 21052
- def test_sort_index_multiindex(self, level):
-
- mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
- s = Series([1, 2], mi)
- backwards = s.iloc[[1, 0]]
-
- # implicit sort_remaining=True
- res = s.sort_index(level=level)
- tm.assert_series_equal(backwards, res)
-
- # GH13496
- # sort has no effect without remaining lvls
- res = s.sort_index(level=level, sort_remaining=False)
- tm.assert_series_equal(s, res)
-
- def test_sort_index_kind(self):
- # GH #14444 & #13589: Add support for sort algo choosing
- series = Series(index=[3, 2, 1, 4, 3], dtype=object)
- expected_series = Series(index=[1, 2, 3, 3, 4], dtype=object)
-
- index_sorted_series = series.sort_index(kind="mergesort")
- tm.assert_series_equal(expected_series, index_sorted_series)
-
- index_sorted_series = series.sort_index(kind="quicksort")
- tm.assert_series_equal(expected_series, index_sorted_series)
-
- index_sorted_series = series.sort_index(kind="heapsort")
- tm.assert_series_equal(expected_series, index_sorted_series)
-
- def test_sort_index_na_position(self):
- series = Series(index=[3, 2, 1, 4, 3, np.nan], dtype=object)
- expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4], dtype=object)
-
- index_sorted_series = series.sort_index(na_position="first")
- tm.assert_series_equal(expected_series_first, index_sorted_series)
-
- expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan], dtype=object)
-
- index_sorted_series = series.sort_index(na_position="last")
- tm.assert_series_equal(expected_series_last, index_sorted_series)
-
- def test_sort_index_intervals(self):
- s = Series(
- [np.nan, 1, 2, 3], IntervalIndex.from_arrays([0, 1, 2, 3], [1, 2, 3, 4])
- )
-
- result = s.sort_index()
- expected = s
- tm.assert_series_equal(result, expected)
-
- result = s.sort_index(ascending=False)
- expected = Series(
- [3, 2, 1, np.nan], IntervalIndex.from_arrays([3, 2, 1, 0], [4, 3, 2, 1])
- )
- tm.assert_series_equal(result, expected)
-
def test_sort_values_categorical(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
@@ -253,7 +140,7 @@ def test_sort_values_categorical(self):
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
- # GH 7848
+ # GH#7848
df = DataFrame(
{"id": [6, 5, 4, 3, 2, 1], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 86931ae23caee..b43dcc5e52c55 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -6,9 +6,7 @@
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import Categorical, DataFrame, MultiIndex, Series, date_range, isna
-from pandas.core.indexes.datetimes import Timestamp
-from pandas.core.indexes.timedeltas import TimedeltaIndex
+from pandas import Categorical, DataFrame, MultiIndex, Series, Timestamp, isna
import pandas.util.testing as tm
@@ -50,76 +48,6 @@ def test_argsort_stable(self):
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
- def test_np_diff(self):
- pytest.skip("skipping due to Series no longer being an ndarray")
-
- # no longer works as the return type of np.diff is now nd.array
- s = Series(np.arange(5))
-
- r = np.diff(s)
- tm.assert_series_equal(Series([np.nan, 0, 0, 0, np.nan]), r)
-
- def test_int_diff(self):
- # int dtype
- a = 10000000000000000
- b = a + 1
- s = Series([a, b])
-
- result = s.diff()
- assert result[1] == 1
-
- def test_tz_diff(self):
- # Combined datetime diff, normal diff and boolean diff test
- ts = tm.makeTimeSeries(name="ts")
- ts.diff()
-
- # neg n
- result = ts.diff(-1)
- expected = ts - ts.shift(-1)
- tm.assert_series_equal(result, expected)
-
- # 0
- result = ts.diff(0)
- expected = ts - ts
- tm.assert_series_equal(result, expected)
-
- # datetime diff (GH3100)
- s = Series(date_range("20130102", periods=5))
- result = s.diff()
- expected = s - s.shift(1)
- tm.assert_series_equal(result, expected)
-
- # timedelta diff
- result = result - result.shift(1) # previous result
- expected = expected.diff() # previously expected
- tm.assert_series_equal(result, expected)
-
- # with tz
- s = Series(
- date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
- )
- result = s.diff()
- expected = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize(
- "input,output,diff",
- [([False, True, True, False, False], [np.nan, True, False, True, False], 1)],
- )
- def test_bool_diff(self, input, output, diff):
- # boolean series (test for fixing #17294)
- s = Series(input)
- result = s.diff()
- expected = Series(output)
- tm.assert_series_equal(result, expected)
-
- def test_obj_diff(self):
- # object series
- s = Series([False, True, 5.0, np.nan, True, False])
- result = s.diff()
- expected = s - s.shift(1)
- tm.assert_series_equal(result, expected)
-
def _check_accum_op(self, name, datetime_series_, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
@@ -550,23 +478,6 @@ def test_is_monotonic(self):
assert s.is_monotonic is False
assert s.is_monotonic_decreasing is True
- def test_sort_index_level(self):
- mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
- s = Series([1, 2], mi)
- backwards = s.iloc[[1, 0]]
-
- res = s.sort_index(level="A")
- tm.assert_series_equal(backwards, res)
-
- res = s.sort_index(level=["A", "B"])
- tm.assert_series_equal(backwards, res)
-
- res = s.sort_index(level="A", sort_remaining=False)
- tm.assert_series_equal(s, res)
-
- res = s.sort_index(level=["A", "B"], sort_remaining=False)
- tm.assert_series_equal(s, res)
-
def test_apply_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = pd.Series(values, name="XX", index=list("abcdefg"))
@@ -584,49 +495,6 @@ def test_apply_categorical(self):
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
- def test_shift_int(self, datetime_series):
- ts = datetime_series.astype(int)
- shifted = ts.shift(1)
- expected = ts.astype(float).shift(1)
- tm.assert_series_equal(shifted, expected)
-
- def test_shift_object_non_scalar_fill(self):
- # shift requires scalar fill_value except for object dtype
- ser = Series(range(3))
- with pytest.raises(ValueError, match="fill_value must be a scalar"):
- ser.shift(1, fill_value=[])
-
- df = ser.to_frame()
- with pytest.raises(ValueError, match="fill_value must be a scalar"):
- df.shift(1, fill_value=np.arange(3))
-
- obj_ser = ser.astype(object)
- result = obj_ser.shift(1, fill_value={})
- assert result[0] == {}
-
- obj_df = obj_ser.to_frame()
- result = obj_df.shift(1, fill_value={})
- assert result.iloc[0, 0] == {}
-
- def test_shift_categorical(self):
- # GH 9416
- s = pd.Series(["a", "b", "c", "d"], dtype="category")
-
- tm.assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna())
-
- sp1 = s.shift(1)
- tm.assert_index_equal(s.index, sp1.index)
- assert np.all(sp1.values.codes[:1] == -1)
- assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
-
- sn2 = s.shift(-2)
- tm.assert_index_equal(s.index, sn2.index)
- assert np.all(sn2.values.codes[-2:] == -1)
- assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
-
- tm.assert_index_equal(s.values.categories, sp1.values.categories)
- tm.assert_index_equal(s.values.categories, sn2.values.categories)
-
def test_unstack(self):
index = MultiIndex(
| among other things, found a sort_index test that got misplaced | https://api.github.com/repos/pandas-dev/pandas/pulls/30432 | 2019-12-23T18:07:15Z | 2019-12-23T19:29:37Z | 2019-12-23T19:29:37Z | 2019-12-23T20:17:53Z |
CI: Remove powershell scripts (Azure) | diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
index 55f80bf644ecc..cb0b17e3553a4 100644
--- a/ci/azure/posix.yml
+++ b/ci/azure/posix.yml
@@ -69,20 +69,13 @@ jobs:
displayName: 'Build versions'
- task: PublishTestResults@2
+ condition: succeededOrFailed()
inputs:
+ failTaskOnFailedTests: true
testResultsFiles: 'test-data.xml'
testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
displayName: 'Publish test results'
- - powershell: |
- $(Get-Content "test-data.xml" | Out-String) -match 'failures="(.*?)"'
- if ($matches[1] -eq 0) {
- Write-Host "No test failures in test-data"
- } else {
- Write-Error "$($matches[1]) tests failed" # will produce $LASTEXITCODE=1
- }
- displayName: 'Check for test failures'
-
- script: |
source activate pandas-dev
python ci/print_skipped.py
diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
index 536c2fdbf1353..03529bd6569c6 100644
--- a/ci/azure/windows.yml
+++ b/ci/azure/windows.yml
@@ -23,33 +23,34 @@ jobs:
Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
Write-Host "##vso[task.prependpath]$HOME/miniconda3/bin"
displayName: 'Add conda to PATH'
+
- script: conda update -q -n base conda
displayName: 'Update conda'
+
- bash: |
conda env create -q --file ci\\deps\\azure-windows-$(CONDA_PY).yaml
displayName: 'Create anaconda environment'
+
- bash: |
source activate pandas-dev
conda list
python setup.py build_ext -q -i
python -m pip install --no-build-isolation -e .
displayName: 'Build'
+
- bash: |
source activate pandas-dev
ci/run_tests.sh
displayName: 'Test'
+
- task: PublishTestResults@2
+ condition: succeededOrFailed()
inputs:
+ failTaskOnFailedTests: true
testResultsFiles: 'test-data.xml'
- testRunTitle: 'Windows-$(CONDA_PY)'
- - powershell: |
- $(Get-Content "test-data.xml" | Out-String) -match 'failures="(.*?)"'
- if ($matches[1] -eq 0) {
- Write-Host "No test failures in test-data"
- } else {
- Write-Error "$($matches[1]) tests failed" # will produce $LASTEXITCODE=1
- }
- displayName: 'Check for test failures'
+ testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
+ displayName: 'Publish test results'
+
- bash: |
source activate pandas-dev
python ci/print_skipped.py
| - [x] closes #26344
Ref @jbrockmendel comment here https://github.com/pandas-dev/pandas/pull/23454#discussion_r360933924
CI: fails as expected example [here](https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=23715&view=logs&j=fd89438c-1fd3-5981-222e-115d06c89c2d) | https://api.github.com/repos/pandas-dev/pandas/pulls/30431 | 2019-12-23T17:09:57Z | 2019-12-24T14:56:57Z | 2019-12-24T14:56:56Z | 2019-12-25T20:35:59Z |
CLN: use f-string for JSON related files | diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 7444ebbaf27e3..b28d23791fd03 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -25,7 +25,6 @@
infer_compression,
stringify_path,
)
-from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import _validate_integer
from ._normalize import convert_to_line_delimits
@@ -175,10 +174,7 @@ class SeriesWriter(Writer):
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == "index":
- raise ValueError(
- "Series index must be unique for orient="
- "'{orient}'".format(orient=self.orient)
- )
+ raise ValueError(f"Series index must be unique for orient='{self.orient}'")
def _write(
self,
@@ -214,8 +210,7 @@ def _format_axes(self):
"""
if not self.obj.index.is_unique and self.orient in ("index", "columns"):
raise ValueError(
- "DataFrame index must be unique for orient="
- "'{orient}'.".format(orient=self.orient)
+ f"DataFrame index must be unique for orient='{self.orient}'."
)
if not self.obj.columns.is_unique and self.orient in (
"index",
@@ -223,8 +218,7 @@ def _format_axes(self):
"records",
):
raise ValueError(
- "DataFrame columns must be unique for orient="
- "'{orient}'.".format(orient=self.orient)
+ f"DataFrame columns must be unique for orient='{self.orient}'."
)
def _write(
@@ -290,8 +284,8 @@ def __init__(
if date_format != "iso":
msg = (
"Trying to write with `orient='table'` and "
- "`date_format='{fmt}'`. Table Schema requires dates "
- "to be formatted with `date_format='iso'`".format(fmt=date_format)
+ f"`date_format='{date_format}'`. Table Schema requires dates "
+ "to be formatted with `date_format='iso'`"
)
raise ValueError(msg)
@@ -828,9 +822,7 @@ def __init__(
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
- raise ValueError(
- "date_unit must be one of {units}".format(units=self._STAMP_UNITS)
- )
+ raise ValueError(f"date_unit must be one of {self._STAMP_UNITS}")
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS["s"]
@@ -850,11 +842,7 @@ def check_keys_split(self, decoded):
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
- raise ValueError(
- "JSON data had unexpected key(s): {bad_keys}".format(
- bad_keys=pprint_thing(bad_keys)
- )
- )
+ raise ValueError(f"JSON data had unexpected key(s): {bad_keys}")
def parse(self):
diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py
index 3c9c906939e8f..aa14c3f3a63f3 100644
--- a/pandas/io/json/_normalize.py
+++ b/pandas/io/json/_normalize.py
@@ -309,7 +309,7 @@ def _recursive_extract(data, path, seen_meta, level=0):
raise KeyError(
"Try running with "
"errors='ignore' as key "
- "{err} is not always present".format(err=e)
+ f"{e} is not always present"
)
meta_vals[key].append(meta_val)
records.extend(recs)
@@ -319,7 +319,7 @@ def _recursive_extract(data, path, seen_meta, level=0):
result = DataFrame(records)
if record_prefix is not None:
- result = result.rename(columns=lambda x: "{p}{c}".format(p=record_prefix, c=x))
+ result = result.rename(columns=lambda x: f"{record_prefix}{x}")
# Data types, a problem
for k, v in meta_vals.items():
@@ -328,8 +328,7 @@ def _recursive_extract(data, path, seen_meta, level=0):
if k in result:
raise ValueError(
- "Conflicting metadata name {name}, "
- "need distinguishing prefix ".format(name=k)
+ f"Conflicting metadata name {k}, need distinguishing prefix "
)
result[k] = np.array(v, dtype=object).repeat(lengths)
return result
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py
index 1e27421a55499..bc5a9783391a4 100644
--- a/pandas/io/json/_table_schema.py
+++ b/pandas/io/json/_table_schema.py
@@ -89,7 +89,7 @@ def set_default_names(data):
data = data.copy()
if data.index.nlevels > 1:
names = [
- name if name is not None else "level_{}".format(i)
+ name if name is not None else f"level_{i}"
for i, name in enumerate(data.index.names)
]
data.index.names = names
@@ -175,7 +175,7 @@ def convert_json_field_to_pandas_type(field):
return "timedelta64"
elif typ == "datetime":
if field.get("tz"):
- return "datetime64[ns, {tz}]".format(tz=field["tz"])
+ return f"datetime64[ns, {field['tz']}]"
else:
return "datetime64[ns]"
elif typ == "any":
@@ -186,7 +186,7 @@ def convert_json_field_to_pandas_type(field):
else:
return "object"
- raise ValueError("Unsupported or invalid field type: {}".format(typ))
+ raise ValueError(f"Unsupported or invalid field type: {typ}")
def build_table_schema(data, index=True, primary_key=None, version=True):
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 7e027a65eec3a..16a4caa7d7ebe 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -93,6 +93,7 @@ def assert_series_equal(self, left, right, **kwargs):
tm.assert_series_equal(left, right, **kwargs)
def assert_frame_equal(self, left, right, *args, **kwargs):
+ obj_type = kwargs.get("obj", "DataFrame")
tm.assert_index_equal(
left.columns,
right.columns,
@@ -100,7 +101,7 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
check_names=kwargs.get("check_names", True),
check_exact=kwargs.get("check_exact", False),
check_categorical=kwargs.get("check_categorical", True),
- obj="{obj}.columns".format(obj=kwargs.get("obj", "DataFrame")),
+ obj=f"{obj_type}.columns",
)
jsons = (left.dtypes == "json").index
diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py
index adbb9dfbd2ddf..5c5c04c35d6b7 100644
--- a/pandas/tests/io/json/test_compression.py
+++ b/pandas/tests/io/json/test_compression.py
@@ -90,10 +90,7 @@ def test_to_json_compression(compression_only, read_infer, to_infer):
compression = compression_only
if compression == "zip":
- pytest.skip(
- "{compression} is not supported "
- "for to_csv".format(compression=compression)
- )
+ pytest.skip(f"{compression} is not supported for to_csv")
# We'll complete file extension subsequently.
filename = "test."
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 49f666344dfa2..fba74d8ebcf97 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -513,7 +513,7 @@ def test_convert_json_field_to_pandas_type(self, inp, exp):
def test_convert_json_field_to_pandas_type_raises(self, inp):
field = {"type": inp}
with pytest.raises(
- ValueError, match=("Unsupported or invalid field type: {}".format(inp))
+ ValueError, match=f"Unsupported or invalid field type: {inp}"
):
convert_json_field_to_pandas_type(field)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index bce3d1de849aa..6489fedad03e3 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -105,7 +105,7 @@ def test_frame_non_unique_index(self, orient):
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
- msg = "DataFrame index must be unique for orient='{}'".format(orient)
+ msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@@ -142,7 +142,7 @@ def test_frame_non_unique_columns(self, orient, data):
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
- msg = "DataFrame columns must be unique for orient='{}'".format(orient)
+ msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@@ -225,13 +225,11 @@ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
- pytest.xfail(
- "Can't have duplicate index values for orient '{}')".format(orient)
- )
+ pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
- pytest.xfail("Orient {} is broken with numpy=True".format(orient))
+ pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
@@ -399,7 +397,7 @@ def test_frame_infinity(self, orient, inf, dtype):
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
- assert encoded == '{{"a_float":{{"0":{}}}}}'.format(expected_val)
+ assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
@@ -593,7 +591,7 @@ def __str__(self) -> str:
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
- assert df_printable.to_json() == '{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
+ assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
@@ -607,19 +605,19 @@ def __str__(self) -> str:
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
- assert df_nonprintable.to_json(
- default_handler=str
- ) == '{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
- assert df_mixed.to_json(
- default_handler=str
- ) == '{{"A":{{"0":"{hex}"}},"B":{{"0":1}}}}'.format(hex=hexed)
+ result = df_nonprintable.to_json(default_handler=str)
+ expected = f'{{"A":{{"0":"{hexed}"}}}}'
+ assert result == expected
+ assert (
+ df_mixed.to_json(default_handler=str)
+ == f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
+ )
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
- df = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]})
- assert df.to_json() == '{{"{bar}":{{"0":1}},"foo":{{"0":1337}}}}'.format(
- bar=("bar" * 100000)
- )
+ result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
+ expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
+ assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
@@ -1431,7 +1429,7 @@ def test_read_timezone_information(self):
)
def test_timedelta_as_label(self, date_format, key):
df = pd.DataFrame([[1]], columns=[pd.Timedelta("1D")])
- expected = '{{"{key}":{{"0":1}}}}'.format(key=key)
+ expected = f'{{"{key}":{{"0":1}}}}'
result = df.to_json(date_format=date_format)
assert result == expected
@@ -1460,7 +1458,7 @@ def test_to_json_indent(self, indent):
result = df.to_json(indent=indent)
spaces = " " * indent
- expected = """{{
+ expected = f"""{{
{spaces}"a":{{
{spaces}{spaces}"0":"foo",
{spaces}{spaces}"1":"baz"
@@ -1469,9 +1467,7 @@ def test_to_json_indent(self, indent):
{spaces}{spaces}"0":"bar",
{spaces}{spaces}"1":"qux"
{spaces}}}
-}}""".format(
- spaces=spaces
- )
+}}"""
assert result == expected
diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py
index c4e03e24a7495..b85032904c5ec 100644
--- a/pandas/tests/io/json/test_readlines.py
+++ b/pandas/tests/io/json/test_readlines.py
@@ -134,10 +134,7 @@ def test_readjson_chunks_closes(chunksize):
reader.read()
assert (
reader.open_stream.closed
- ), "didn't close stream with \
- chunksize = {chunksize}".format(
- chunksize=chunksize
- )
+ ), f"didn't close stream with chunksize = {chunksize}"
@pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"])
@@ -170,9 +167,7 @@ def test_readjson_chunks_multiple_empty_lines(chunksize):
test = pd.read_json(j, lines=True, chunksize=chunksize)
if chunksize is not None:
test = pd.concat(test)
- tm.assert_frame_equal(
- orig, test, obj="chunksize: {chunksize}".format(chunksize=chunksize)
- )
+ tm.assert_frame_equal(orig, test, obj=f"chunksize: {chunksize}")
def test_readjson_unicode(monkeypatch):
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index bb150c5825650..6008f6b651c2a 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -362,21 +362,21 @@ def test_encode_date_conversion(self):
)
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
- expected = '"{iso}"'.format(iso=test.isoformat())
+ expected = f'"{test.isoformat()}"'
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
- expected = '"{iso}"'.format(iso=test.isoformat())
+ expected = f'"{test.isoformat()}"'
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
- expected = '"{iso}"'.format(iso=test.isoformat())
+ expected = f'"{test.isoformat()}"'
assert expected == output
@pytest.mark.parametrize(
@@ -580,7 +580,7 @@ class Nested:
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
- doc = '{{"id": {val}}}'.format(val=val)
+ doc = f'{{"id": {val}}}'
assert ujson.decode(doc)["id"] == val
def test_encode_big_escape(self):
| - [x] ref #29547
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/30430 | 2019-12-23T17:01:30Z | 2019-12-24T17:06:19Z | 2019-12-24T17:06:19Z | 2020-06-23T17:09:06Z |
CLN: Use of Iterable from collections.abc instead of typing.Iterable | diff --git a/pandas/_typing.py b/pandas/_typing.py
index 445eff9e19e47..69b08c581cff9 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -3,8 +3,8 @@
IO,
TYPE_CHECKING,
AnyStr,
+ Collection,
Dict,
- Iterable,
List,
Optional,
TypeVar,
@@ -37,8 +37,7 @@
Ordered = Optional[bool]
JSONSerializable = Union[Scalar, List, Dict]
-# use Collection after we drop support for py35
-Axes = Iterable
+Axes = Collection
# to maintain type information across generic functions and parametrization
_T = TypeVar("_T")
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30427 | 2019-12-23T10:40:14Z | 2019-12-24T16:14:25Z | 2019-12-24T16:14:25Z | 2019-12-24T16:16:53Z |
CLN: changed .format to fstring in pandas/io/formats/csvs.py | diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 3a91d65ab4562..72ba1a892cb8f 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -238,10 +238,7 @@ def _save_header(self):
if has_aliases:
if len(header) != len(cols):
raise ValueError(
- (
- "Writing {ncols} cols but got {nalias} "
- "aliases".format(ncols=len(cols), nalias=len(header))
- )
+ f"Writing {len(cols)} cols but got {len(header)} aliases"
)
else:
write_cols = header
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
https://github.com/pandas-dev/pandas/issues/29547
My first contribution. | https://api.github.com/repos/pandas-dev/pandas/pulls/30425 | 2019-12-23T07:21:05Z | 2019-12-24T15:04:40Z | 2019-12-24T15:04:40Z | 2019-12-24T15:04:43Z |
ISORT: Add _typing to pre_core isort block | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 53ae1094c6765..96a4eb1b3bf32 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -10,6 +10,7 @@
import numpy as np
+from pandas._typing import ArrayLike
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
@@ -21,7 +22,6 @@
from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
-from pandas._typing import ArrayLike
from pandas.core import ops
from pandas.core.algorithms import _factorize_array, unique
from pandas.core.missing import backfill_1d, pad_1d
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 6b422adef2d68..4d6be8221557d 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -8,6 +8,7 @@
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
+from pandas._typing import ArrayLike, Dtype, Ordered
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
@@ -41,7 +42,6 @@
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
-from pandas._typing import ArrayLike, Dtype, Ordered
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index f5d1e62f44fd0..045e511e32586 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -10,6 +10,7 @@
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
+from pandas._typing import DatetimeLikeScalar
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
@@ -37,7 +38,6 @@
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
-from pandas._typing import DatetimeLikeScalar
from pandas.core import missing, nanops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
import pandas.core.common as com
diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
index ce16a1620eed5..e3e0064c84da3 100644
--- a/pandas/core/arrays/sparse/dtype.py
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -5,6 +5,8 @@
import numpy as np
+from pandas._typing import Dtype
+
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
@@ -17,8 +19,6 @@
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.missing import isna, na_value_for_dtype
-from pandas._typing import Dtype
-
@register_extension_dtype
class SparseDtype(ExtensionDtype):
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index b3b37ee3d0c98..cc8311cf3e21d 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -11,6 +11,7 @@
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
+from pandas._typing import ArrayLike, Dtype
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
@@ -44,7 +45,6 @@
)
from pandas.core.dtypes.missing import isna
-from pandas._typing import ArrayLike, Dtype
import pandas.core.common as com
if TYPE_CHECKING:
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 8e629896fdb7b..dc22a79a2f3fe 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -6,6 +6,7 @@
from pandas._libs import algos, lib
from pandas._libs.tslibs import conversion
+from pandas._typing import ArrayLike
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
@@ -46,8 +47,6 @@
is_sequence,
)
-from pandas._typing import ArrayLike
-
_POSSIBLY_CAST_DTYPES = {
np.dtype(t).name
for t in [
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 6f8f6e8abbc0a..b77cd34700f10 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -7,11 +7,10 @@
from pandas._libs.interval import Interval
from pandas._libs.tslibs import NaT, Period, Timestamp, timezones
+from pandas._typing import Ordered
from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCDateOffset, ABCIndexClass
-from pandas._typing import Ordered
-
from .base import ExtensionDtype
from .inference import is_bool, is_list_like
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 766437dbad8f8..51330bfc55dc3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -36,6 +36,7 @@
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib
+from pandas._typing import Axes, Dtype, FilePathOrBuffer
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
@@ -91,7 +92,6 @@
)
from pandas.core.dtypes.missing import isna, notna
-from pandas._typing import Axes, Dtype, FilePathOrBuffer
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 03bd1b331ec30..bea246c3f1b98 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -29,6 +29,7 @@
from pandas._config import config
from pandas._libs import Timestamp, iNaT, properties
+from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries, JSONSerializable
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
@@ -67,7 +68,6 @@
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
-from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries, JSONSerializable
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index eaa4f51c155a9..b2543289f68c6 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -25,6 +25,7 @@
import numpy as np
from pandas._libs import Timestamp, lib
+from pandas._typing import FrameOrSeries
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.cast import (
@@ -47,7 +48,6 @@
)
from pandas.core.dtypes.missing import _isna_ndarraylike, isna, notna
-from pandas._typing import FrameOrSeries
import pandas.core.algorithms as algorithms
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 609ae425658c3..b5325d8305249 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -33,6 +33,7 @@ class providing the base-class of operations.
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
+from pandas._typing import FrameOrSeries, Scalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
@@ -50,7 +51,6 @@ class providing the base-class of operations.
)
from pandas.core.dtypes.missing import isna, notna
-from pandas._typing import FrameOrSeries, Scalar
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray, try_cast_to_ea
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index d7c3feef3711f..2c224a1bef338 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -7,6 +7,7 @@
import numpy as np
+from pandas._typing import FrameOrSeries
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
@@ -19,7 +20,6 @@
)
from pandas.core.dtypes.generic import ABCSeries
-from pandas._typing import FrameOrSeries
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, ExtensionArray
import pandas.core.common as com
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index f123a5265bb0d..a99ebe77e8254 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -14,6 +14,7 @@
from pandas._libs import NaT, iNaT, lib
import pandas._libs.groupby as libgroupby
import pandas._libs.reduction as libreduction
+from pandas._typing import FrameOrSeries
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
@@ -36,7 +37,6 @@
)
from pandas.core.dtypes.missing import _maybe_fill, isna
-from pandas._typing import FrameOrSeries
import pandas.core.algorithms as algorithms
from pandas.core.base import SelectionMixin
import pandas.core.common as com
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index d35117b8db86e..dd917a524e491 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -7,6 +7,7 @@
from pandas._libs import index as libindex
from pandas._libs.hashtable import duplicated_int64
+from pandas._typing import AnyArrayLike
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
@@ -22,7 +23,6 @@
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.missing import isna
-from pandas._typing import AnyArrayLike
from pandas.core import accessor
from pandas.core.algorithms import take_1d
from pandas.core.arrays.categorical import Categorical, _recode_for_categories, contains
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index dee4c959f8c90..b61e80b9e89a7 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -9,6 +9,7 @@
from pandas._libs import Timedelta, Timestamp, lib
from pandas._libs.interval import Interval, IntervalMixin, IntervalTree
+from pandas._typing import AnyArrayLike
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._exceptions import rewrite_exception
@@ -36,7 +37,6 @@
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna
-from pandas._typing import AnyArrayLike
from pandas.core.algorithms import take_1d
from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs
import pandas.core.common as com
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index b84c69b8caf51..00d81f3ed95a9 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -1,6 +1,7 @@
import numpy as np
from pandas._libs import index as libindex, lib
+from pandas._typing import Dtype
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.cast import astype_nansafe
@@ -27,7 +28,6 @@
)
from pandas.core.dtypes.missing import isna
-from pandas._typing import Dtype
from pandas.core import algorithms
import pandas.core.common as com
from pandas.core.indexes.base import Index, InvalidIndexError, _index_shared_docs
diff --git a/pandas/core/ops/dispatch.py b/pandas/core/ops/dispatch.py
index 6a2aba4264874..1eb952c1394ac 100644
--- a/pandas/core/ops/dispatch.py
+++ b/pandas/core/ops/dispatch.py
@@ -5,6 +5,8 @@
import numpy as np
+from pandas._typing import ArrayLike
+
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_extension_array_dtype,
@@ -15,7 +17,6 @@
)
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
-from pandas._typing import ArrayLike
from pandas.core.construction import array
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 3dfd5fed34741..37ec05c40940e 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -13,6 +13,7 @@
from pandas._libs import Timedelta, hashtable as libhashtable, lib
import pandas._libs.join as libjoin
+from pandas._typing import FrameOrSeries
from pandas.errors import MergeError
from pandas.util._decorators import Appender, Substitution
@@ -40,7 +41,6 @@
from pandas.core.dtypes.missing import isna, na_value_for_dtype
from pandas import Categorical, Index, MultiIndex
-from pandas._typing import FrameOrSeries
import pandas.core.algorithms as algos
from pandas.core.arrays.categorical import _recode_for_categories
import pandas.core.common as com
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 0ef39a685f1ce..02f4eb47ba914 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -9,6 +9,7 @@
import pandas._libs.lib as lib
import pandas._libs.ops as libops
+from pandas._typing import ArrayLike, Dtype
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import (
@@ -32,7 +33,6 @@
)
from pandas.core.dtypes.missing import isna
-from pandas._typing import ArrayLike, Dtype
from pandas.core.algorithms import take_1d
from pandas.core.base import NoNewAttributesMixin
import pandas.core.common as com
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 8fa4b500b8c1e..2d43623cec92c 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -15,6 +15,7 @@
parse_time_string,
)
from pandas._libs.tslibs.strptime import array_strptime
+from pandas._typing import ArrayLike
from pandas.core.dtypes.common import (
ensure_object,
@@ -37,7 +38,6 @@
)
from pandas.core.dtypes.missing import notna
-from pandas._typing import ArrayLike
from pandas.core import algorithms
from pandas.core.algorithms import unique
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index c684acdbf917c..07b484321a665 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -11,6 +11,7 @@
import numpy as np
import pandas._libs.window.aggregations as window_aggregations
+from pandas._typing import Axis, FrameOrSeries, Scalar
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
@@ -35,7 +36,6 @@
ABCTimedeltaIndex,
)
-from pandas._typing import Axis, FrameOrSeries, Scalar
from pandas.core.base import DataError, PandasObject, SelectionMixin, ShallowMixin
import pandas.core.common as com
from pandas.core.indexes.api import Index, ensure_index
diff --git a/pandas/io/common.py b/pandas/io/common.py
index d682604cf7aab..e165f8baef3e6 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -30,6 +30,7 @@
)
import zipfile
+from pandas._typing import FilePathOrBuffer
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.errors import ( # noqa
AbstractMethodError,
@@ -41,8 +42,6 @@
from pandas.core.dtypes.common import is_file_like
-from pandas._typing import FilePathOrBuffer
-
lzma = _import_lzma()
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 78054936f50f2..6b9943136664a 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -1,9 +1,9 @@
from typing import List
+from pandas._typing import FilePathOrBuffer, Scalar
from pandas.compat._optional import import_optional_dependency
import pandas as pd
-from pandas._typing import FilePathOrBuffer, Scalar
from pandas.io.excel._base import _BaseExcelReader
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index d278c6b3bbef2..7a264ed2b0850 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -2,9 +2,8 @@
import numpy as np
-from pandas.compat._optional import import_optional_dependency
-
from pandas._typing import FilePathOrBuffer, Scalar
+from pandas.compat._optional import import_optional_dependency
from pandas.io.excel._base import ExcelWriter, _BaseExcelReader
from pandas.io.excel._util import _validate_freeze_panes
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index b0574925cf1b1..1b18e0fc3f0fa 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -38,6 +38,7 @@
from pandas._libs.tslib import format_array_from_datetime
from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT
from pandas._libs.tslibs.nattype import NaTType
+from pandas._typing import FilePathOrBuffer
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.common import (
@@ -63,7 +64,6 @@
)
from pandas.core.dtypes.missing import isna, notna
-from pandas._typing import FilePathOrBuffer
from pandas.core.arrays.datetimes import DatetimeArray
from pandas.core.arrays.timedeltas import TimedeltaArray
from pandas.core.base import PandasObject
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 7444ebbaf27e3..15346e5798cd7 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -10,12 +10,12 @@
import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
+from pandas._typing import JSONSerializable
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.common import ensure_str, is_period_dtype
from pandas import DataFrame, MultiIndex, Series, isna, to_datetime
-from pandas._typing import JSONSerializable
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.reshape.concat import concat
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6699575e61656..17e275b84f451 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -20,6 +20,7 @@
import pandas._libs.parsers as parsers
from pandas._libs.parsers import STR_NA_VALUES
from pandas._libs.tslibs import parsing
+from pandas._typing import FilePathOrBuffer
from pandas.errors import (
AbstractMethodError,
EmptyDataError,
@@ -49,7 +50,6 @@
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
-from pandas._typing import FilePathOrBuffer
from pandas.core import algorithms
from pandas.core.arrays import Categorical
from pandas.core.frame import DataFrame
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 8e0ab27c1fa85..5f4636ac070bb 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -27,6 +27,7 @@
from pandas._libs import lib, writers as libwriters
from pandas._libs.tslibs import timezones
+from pandas._typing import ArrayLike, FrameOrSeries
from pandas.compat._optional import import_optional_dependency
from pandas.errors import PerformanceWarning
from pandas.util._decorators import cache_readonly
@@ -57,7 +58,6 @@
concat,
isna,
)
-from pandas._typing import ArrayLike, FrameOrSeries
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
from pandas.core.computation.pytables import PyTablesExpr, maybe_expression
diff --git a/pandas/io/s3.py b/pandas/io/s3.py
index 7e0a37e8cba20..976c319f89d47 100644
--- a/pandas/io/s3.py
+++ b/pandas/io/s3.py
@@ -2,9 +2,8 @@
from typing import IO, Any, Optional, Tuple
from urllib.parse import urlparse as parse_url
-from pandas.compat._optional import import_optional_dependency
-
from pandas._typing import FilePathOrBuffer
+from pandas.compat._optional import import_optional_dependency
s3fs = import_optional_dependency(
"s3fs", extra="The s3fs package is required to handle s3 files."
diff --git a/setup.cfg b/setup.cfg
index c7d3394568f9c..8fb602188dad5 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -104,7 +104,7 @@ directory = coverage_html_report
# To be kept consistent with "Import Formatting" section in contributing.rst
[isort]
known_pre_libs = pandas._config
-known_pre_core = pandas._libs,pandas.util._*,pandas.compat,pandas.errors
+known_pre_core = pandas._libs,pandas._typing,pandas.util._*,pandas.compat,pandas.errors
known_dtypes = pandas.core.dtypes
known_post_core = pandas.tseries,pandas.io,pandas.plotting
sections = FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER
| - [x] closes #27534 | https://api.github.com/repos/pandas-dev/pandas/pulls/30423 | 2019-12-23T03:57:55Z | 2019-12-23T18:09:27Z | 2019-12-23T18:09:27Z | 2019-12-23T18:09:50Z |
REF: standardize test_constructor filenames | diff --git a/pandas/tests/base/test_construction.py b/pandas/tests/base/test_constructors.py
similarity index 100%
rename from pandas/tests/base/test_construction.py
rename to pandas/tests/base/test_constructors.py
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_constructors.py
similarity index 100%
rename from pandas/tests/indexes/datetimes/test_construction.py
rename to pandas/tests/indexes/datetimes/test_constructors.py
diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_constructors.py
similarity index 100%
rename from pandas/tests/indexes/interval/test_construction.py
rename to pandas/tests/indexes/interval/test_constructors.py
diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructors.py
similarity index 100%
rename from pandas/tests/indexes/multi/test_constructor.py
rename to pandas/tests/indexes/multi/test_constructors.py
diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_constructors.py
similarity index 100%
rename from pandas/tests/indexes/period/test_construction.py
rename to pandas/tests/indexes/period/test_constructors.py
diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_constructors.py
similarity index 100%
rename from pandas/tests/indexes/timedeltas/test_construction.py
rename to pandas/tests/indexes/timedeltas/test_constructors.py
diff --git a/pandas/tests/scalar/timedelta/test_construction.py b/pandas/tests/scalar/timedelta/test_constructors.py
similarity index 100%
rename from pandas/tests/scalar/timedelta/test_construction.py
rename to pandas/tests/scalar/timedelta/test_constructors.py
| https://api.github.com/repos/pandas-dev/pandas/pulls/30422 | 2019-12-23T02:34:38Z | 2019-12-23T13:58:43Z | 2019-12-23T13:58:43Z | 2019-12-23T15:47:36Z | |
fixed pandas/io/formats/printing.py formatting: replaced %/{}.format … | diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 8218799129952..2176487ff6a36 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -325,8 +325,8 @@ def format_object_summary(
if indent_for_name:
name_len = len(name)
- space1 = "\n%s" % (" " * (name_len + 1))
- space2 = "\n%s" % (" " * (name_len + 2))
+ space1 = f'\n{(" " * (name_len + 1))}'
+ space2 = f'\n{(" " * (name_len + 2))}'
else:
space1 = "\n"
space2 = "\n " # space for the opening '['
@@ -363,14 +363,14 @@ def best_len(values):
close = ", "
if n == 0:
- summary = "[]{}".format(close)
+ summary = f"[]{close}"
elif n == 1 and not line_break_each_value:
first = formatter(obj[0])
- summary = "[{}]{}".format(first, close)
+ summary = f"[{first}]{close}"
elif n == 2 and not line_break_each_value:
first = formatter(obj[0])
last = formatter(obj[-1])
- summary = "[{}, {}]{}".format(first, last, close)
+ summary = f"[{first}, {last}]{close}"
else:
if n > max_seq_items:
@@ -516,7 +516,7 @@ def format_object_attrs(
attrs: List[Tuple[str, Union[str, int]]] = []
if hasattr(obj, "dtype") and include_dtype:
# error: "Sequence[Any]" has no attribute "dtype"
- attrs.append(("dtype", "'{}'".format(obj.dtype))) # type: ignore
+ attrs.append(("dtype", f"'{obj.dtype}'")) # type: ignore
if getattr(obj, "name", None) is not None:
# error: "Sequence[Any]" has no attribute "name"
attrs.append(("name", default_pprint(obj.name))) # type: ignore
| Updated pandas/io/formats/printing.py formatting. Replaced % and {}.format() examples to python3 formatting. Ran tests and they passed.
ref [#29886](https://github.com/pandas-dev/pandas/issues/29886) and [#29547](https://github.com/pandas-dev/pandas/issues/29547). | https://api.github.com/repos/pandas-dev/pandas/pulls/30421 | 2019-12-23T02:23:35Z | 2019-12-23T08:05:22Z | 2019-12-23T08:05:22Z | 2019-12-23T11:47:26Z |
Fixing pandas/tests/indexes/datetimes/test_ops.py to utilize python3 … | diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index cd8d5bd7b260b..fb032947143d3 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -41,9 +41,9 @@ def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
- msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
- with pytest.raises(AttributeError, match=msg.format(op)):
+ msg = f"'Series' object has no attribute '{op}'"
+ with pytest.raises(AttributeError, match=msg):
getattr(self.dt_series, op)
# attribute access should still work!
| Updated pandas/tests/indexes/datetimes/test_ops.py to utilize python3 format strings. Ran the test and it seems good. I moved the msg into the for loop because it seems cleaner with the new format string logic.
ref [#29886](https://github.com/pandas-dev/pandas/issues/29886) and [#29547](https://github.com/pandas-dev/pandas/issues/29547). | https://api.github.com/repos/pandas-dev/pandas/pulls/30420 | 2019-12-23T01:41:24Z | 2019-12-23T08:03:55Z | 2019-12-23T08:03:54Z | 2019-12-23T11:46:59Z |
BUG: strengthen typing in get_c_string, fix StringHashTable segfault | diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd
index 51ec4ba43159c..0499eabf708af 100644
--- a/pandas/_libs/hashtable.pxd
+++ b/pandas/_libs/hashtable.pxd
@@ -36,8 +36,8 @@ cdef class PyObjectHashTable(HashTable):
cdef class StringHashTable(HashTable):
cdef kh_str_t *table
- cpdef get_item(self, object val)
- cpdef set_item(self, object key, Py_ssize_t val)
+ cpdef get_item(self, str val)
+ cpdef set_item(self, str key, Py_ssize_t val)
cdef struct Int64VectorData:
int64_t *data
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index b207fcb66948d..7d57c67e70b58 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -599,7 +599,7 @@ cdef class StringHashTable(HashTable):
sizeof(Py_ssize_t) + # vals
sizeof(uint32_t)) # flags
- cpdef get_item(self, object val):
+ cpdef get_item(self, str val):
cdef:
khiter_t k
const char *v
@@ -611,16 +611,16 @@ cdef class StringHashTable(HashTable):
else:
raise KeyError(val)
- cpdef set_item(self, object key, Py_ssize_t val):
+ cpdef set_item(self, str key, Py_ssize_t val):
cdef:
khiter_t k
int ret = 0
const char *v
- v = get_c_string(val)
+ v = get_c_string(key)
k = kh_put_str(self.table, v, &ret)
- self.table.keys[k] = key
+ self.table.keys[k] = v
if kh_exist_str(self.table, k):
self.table.vals[k] = val
else:
@@ -784,7 +784,7 @@ cdef class StringHashTable(HashTable):
labels[i] = na_sentinel
else:
# if ignore_na is False, we also stringify NaN/None/etc.
- v = get_c_string(val)
+ v = get_c_string(<str>val)
vecs[i] = v
# compute
diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd
index 63cbd36f9cd1d..936532a81c6d6 100644
--- a/pandas/_libs/tslibs/util.pxd
+++ b/pandas/_libs/tslibs/util.pxd
@@ -218,7 +218,7 @@ cdef inline bint is_nan(object val):
return is_complex_object(val) and val != val
-cdef inline const char* get_c_string_buf_and_size(object py_string,
+cdef inline const char* get_c_string_buf_and_size(str py_string,
Py_ssize_t *length):
"""
Extract internal char* buffer of unicode or bytes object `py_string` with
@@ -231,7 +231,7 @@ cdef inline const char* get_c_string_buf_and_size(object py_string,
Parameters
----------
- py_string : object
+ py_string : str
length : Py_ssize_t*
Returns
@@ -241,12 +241,9 @@ cdef inline const char* get_c_string_buf_and_size(object py_string,
cdef:
const char *buf
- if PyUnicode_Check(py_string):
- buf = PyUnicode_AsUTF8AndSize(py_string, length)
- else:
- PyBytes_AsStringAndSize(py_string, <char**>&buf, length)
+ buf = PyUnicode_AsUTF8AndSize(py_string, length)
return buf
-cdef inline const char* get_c_string(object py_string):
+cdef inline const char* get_c_string(str py_string):
return get_c_string_buf_and_size(py_string, NULL)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index e0e4beffe113a..82f647c9385b2 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1402,6 +1402,19 @@ class TestGroupVarFloat32(GroupVarTestMixin):
class TestHashTable:
+ def test_string_hashtable_set_item_signature(self):
+ # GH#30419 fix typing in StringHashTable.set_item to prevent segfault
+ tbl = ht.StringHashTable()
+
+ tbl.set_item("key", 1)
+ assert tbl.get_item("key") == 1
+
+ with pytest.raises(TypeError, match="'key' has incorrect type"):
+ # key arg typed as string, not object
+ tbl.set_item(4, 6)
+ with pytest.raises(TypeError, match="'val' has incorrect type"):
+ tbl.get_item(4)
+
def test_lookup_nan(self, writable):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
# GH 21688 ensure we can deal with readonly memory views
| Start with util.pxd, we can tighten the arg from "object" to "str" and simplify it a bit. Tracking down the places where get_c_string is used, the main one is in StringHashTable, where in `set_item` it is currently used incorrectly. The test added by this PR segfaults in master.
@TomAugspurger it looks like StringHashTable has relatively light testing. Should we be using it for StringArray? | https://api.github.com/repos/pandas-dev/pandas/pulls/30419 | 2019-12-23T01:35:17Z | 2019-12-24T14:26:40Z | 2019-12-24T14:26:40Z | 2019-12-24T16:51:15Z |
Fixing pandas/tests/indexes/multi/test_analytics.py to utilize python… | diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py
index 36152bc4b60cd..a6d08c845d941 100644
--- a/pandas/tests/indexes/multi/test_analytics.py
+++ b/pandas/tests/indexes/multi/test_analytics.py
@@ -277,7 +277,7 @@ def test_map(idx):
def test_map_dictlike(idx, mapper):
if isinstance(idx, (pd.CategoricalIndex, pd.IntervalIndex)):
- pytest.skip("skipping tests for {}".format(type(idx)))
+ pytest.skip(f"skipping tests for {type(idx)}")
identity = mapper(idx.values, idx)
@@ -330,13 +330,13 @@ def test_numpy_ufuncs(idx, func):
if _np_version_under1p17:
expected_exception = AttributeError
- msg = "'tuple' object has no attribute '{}'".format(func.__name__)
+ msg = f"'tuple' object has no attribute '{func.__name__}'"
else:
expected_exception = TypeError
msg = (
"loop of ufunc does not support argument 0 of type tuple which"
- " has no callable {} method"
- ).format(func.__name__)
+ f" has no callable {func.__name__} method"
+ )
with pytest.raises(expected_exception, match=msg):
func(idx)
@@ -348,9 +348,9 @@ def test_numpy_ufuncs(idx, func):
)
def test_numpy_type_funcs(idx, func):
msg = (
- "ufunc '{}' not supported for the input types, and the inputs"
+ f"ufunc '{func.__name__}' not supported for the input types, and the inputs"
" could not be safely coerced to any supported types according to"
" the casting rule ''safe''"
- ).format(func.__name__)
+ )
with pytest.raises(TypeError, match=msg):
func(idx)
| Modified reflect python3 format strings on pandas/tests/indexes/multi/test_analytics.py. I ran the test and it passed like before. ref [#29886](https://github.com/pandas-dev/pandas/issues/29886) and [#29547](https://github.com/pandas-dev/pandas/issues/29547). | https://api.github.com/repos/pandas-dev/pandas/pulls/30418 | 2019-12-23T01:26:17Z | 2019-12-23T15:25:24Z | 2019-12-23T15:25:24Z | 2019-12-23T15:25:27Z |
Replaced xrange with range | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 559cedd62e7ce..94eaab0a5b4da 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -52,7 +52,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
black --version
MSG='Checking black formatting' ; echo $MSG
- black . --check
+ black . --check
RET=$(($RET + $?)) ; echo $MSG "DONE"
# `setup.cfg` contains the list of error codes that are being ignored in flake8
@@ -104,7 +104,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
isort --version-number
# Imports - Check formatting using isort see setup.cfg for settings
- MSG='Check import format using isort ' ; echo $MSG
+ MSG='Check import format using isort' ; echo $MSG
ISORT_CMD="isort --recursive --check-only pandas asv_bench"
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
eval $ISORT_CMD | awk '{print "##[error]" $0}'; RET=$(($RET + ${PIPESTATUS[0]}))
@@ -203,6 +203,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
invgrep -R --include=*.{py,pyx} '\.__class__' pandas
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ MSG='Check for use of xrange instead of range' ; echo $MSG
+ invgrep -R --include=*.{py,pyx} 'xrange' pandas
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
MSG='Check that no file in the repo contains trailing whitespaces' ; echo $MSG
INVGREP_APPEND=" <- trailing whitespaces found"
invgrep -RI --exclude=\*.{svg,c,cpp,html,js} --exclude-dir=env "\s$" *
diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx
index e41914f3aa9ad..026bd7a44a509 100644
--- a/pandas/_libs/testing.pyx
+++ b/pandas/_libs/testing.pyx
@@ -159,7 +159,7 @@ cpdef assert_almost_equal(a, b,
raise_assert_detail(obj, f"{obj} length are different", na, nb, r)
- for i in xrange(len(a)):
+ for i in range(len(a)):
try:
assert_almost_equal(a[i], b[i],
check_less_precise=check_less_precise)
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 0762523d53428..6c8b654c1955c 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -277,21 +277,19 @@ def test_constructor_with_index(self):
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
- xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
- cat = Categorical(xrange(3))
+ cat = Categorical(range(3))
tm.assert_categorical_equal(cat, exp)
- # This uses xrange internally
MultiIndex.from_product([range(5), ["a", "b", "c"]])
# check that categories accept generators and sequences
cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
- cat = Categorical([0, 1, 2], categories=xrange(3))
+ cat = Categorical([0, 1, 2], categories=range(3))
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize(
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index b27e7c217c4c2..eb8febb10a646 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -585,10 +585,6 @@ def test_strange_column_corruption_issue(self):
df = DataFrame(index=[0, 1])
df[0] = np.nan
wasCol = {}
- # uncommenting these makes the results match
- # for col in xrange(100, 200):
- # wasCol[col] = 1
- # df[col] = np.nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 39cbe843d1f2b..bc26615d1aad5 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -178,7 +178,7 @@ def test_skiprows_int(self):
assert_framelist_equal(df1, df2)
- def test_skiprows_xrange(self):
+ def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=range(2))[0]
df2 = self.read_html(self.spam_data, "Unit", skiprows=range(2))[0]
tm.assert_frame_equal(df1, df2)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30417 | 2019-12-22T23:57:35Z | 2019-12-23T08:27:45Z | 2019-12-23T08:27:45Z | 2019-12-23T08:43:49Z |
REF: method-specific test files for Series/DataFrame | diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py
new file mode 100644
index 0000000000000..9293855e79b1c
--- /dev/null
+++ b/pandas/tests/frame/methods/test_diff.py
@@ -0,0 +1,120 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Series, Timestamp, date_range
+import pandas.util.testing as tm
+
+
+class TestDataFrameDiff:
+ def test_diff(self, datetime_frame):
+ the_diff = datetime_frame.diff(1)
+
+ tm.assert_series_equal(
+ the_diff["A"], datetime_frame["A"] - datetime_frame["A"].shift(1)
+ )
+
+ # int dtype
+ a = 10000000000000000
+ b = a + 1
+ s = Series([a, b])
+
+ rs = DataFrame({"s": s}).diff()
+ assert rs.s[1] == 1
+
+ # mixed numeric
+ tf = datetime_frame.astype("float32")
+ the_diff = tf.diff(1)
+ tm.assert_series_equal(the_diff["A"], tf["A"] - tf["A"].shift(1))
+
+ # GH#10907
+ df = pd.DataFrame({"y": pd.Series([2]), "z": pd.Series([3])})
+ df.insert(0, "x", 1)
+ result = df.diff(axis=1)
+ expected = pd.DataFrame(
+ {"x": np.nan, "y": pd.Series(1), "z": pd.Series(1)}
+ ).astype("float64")
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("tz", [None, "UTC"])
+ def test_diff_datetime_axis0(self, tz):
+ # GH#18578
+ df = DataFrame(
+ {
+ 0: date_range("2010", freq="D", periods=2, tz=tz),
+ 1: date_range("2010", freq="D", periods=2, tz=tz),
+ }
+ )
+
+ result = df.diff(axis=0)
+ expected = DataFrame(
+ {
+ 0: pd.TimedeltaIndex(["NaT", "1 days"]),
+ 1: pd.TimedeltaIndex(["NaT", "1 days"]),
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("tz", [None, "UTC"])
+ def test_diff_datetime_axis1(self, tz):
+ # GH#18578
+ df = DataFrame(
+ {
+ 0: date_range("2010", freq="D", periods=2, tz=tz),
+ 1: date_range("2010", freq="D", periods=2, tz=tz),
+ }
+ )
+ if tz is None:
+ result = df.diff(axis=1)
+ expected = DataFrame(
+ {
+ 0: pd.TimedeltaIndex(["NaT", "NaT"]),
+ 1: pd.TimedeltaIndex(["0 days", "0 days"]),
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+ else:
+ with pytest.raises(NotImplementedError):
+ result = df.diff(axis=1)
+
+ def test_diff_timedelta(self):
+ # GH#4533
+ df = DataFrame(
+ dict(
+ time=[Timestamp("20130101 9:01"), Timestamp("20130101 9:02")],
+ value=[1.0, 2.0],
+ )
+ )
+
+ res = df.diff()
+ exp = DataFrame(
+ [[pd.NaT, np.nan], [pd.Timedelta("00:01:00"), 1]], columns=["time", "value"]
+ )
+ tm.assert_frame_equal(res, exp)
+
+ def test_diff_mixed_dtype(self):
+ df = DataFrame(np.random.randn(5, 3))
+ df["A"] = np.array([1, 2, 3, 4, 5], dtype=object)
+
+ result = df.diff()
+ assert result[0].dtype == np.float64
+
+ def test_diff_neg_n(self, datetime_frame):
+ rs = datetime_frame.diff(-1)
+ xp = datetime_frame - datetime_frame.shift(-1)
+ tm.assert_frame_equal(rs, xp)
+
+ def test_diff_float_n(self, datetime_frame):
+ rs = datetime_frame.diff(1.0)
+ xp = datetime_frame.diff(1)
+ tm.assert_frame_equal(rs, xp)
+
+ def test_diff_axis(self):
+ # GH#9727
+ df = DataFrame([[1.0, 2.0], [3.0, 4.0]])
+ tm.assert_frame_equal(
+ df.diff(axis=1), DataFrame([[np.nan, 1.0], [np.nan, 1.0]])
+ )
+ tm.assert_frame_equal(
+ df.diff(axis=0), DataFrame([[np.nan, np.nan], [2.0, 2.0]])
+ )
diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py
new file mode 100644
index 0000000000000..7fb8fbbc95627
--- /dev/null
+++ b/pandas/tests/frame/methods/test_shift.py
@@ -0,0 +1,187 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Index, Series, date_range, offsets
+import pandas.util.testing as tm
+
+
+class TestDataFrameShift:
+ def test_shift(self, datetime_frame, int_frame):
+ # naive shift
+ shiftedFrame = datetime_frame.shift(5)
+ tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
+
+ shiftedSeries = datetime_frame["A"].shift(5)
+ tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
+
+ shiftedFrame = datetime_frame.shift(-5)
+ tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
+
+ shiftedSeries = datetime_frame["A"].shift(-5)
+ tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
+
+ # shift by 0
+ unshifted = datetime_frame.shift(0)
+ tm.assert_frame_equal(unshifted, datetime_frame)
+
+ # shift by DateOffset
+ shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
+ assert len(shiftedFrame) == len(datetime_frame)
+
+ shiftedFrame2 = datetime_frame.shift(5, freq="B")
+ tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
+
+ d = datetime_frame.index[0]
+ shifted_d = d + offsets.BDay(5)
+ tm.assert_series_equal(
+ datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
+ )
+
+ # shift int frame
+ int_shifted = int_frame.shift(1) # noqa
+
+ # Shifting with PeriodIndex
+ ps = tm.makePeriodFrame()
+ shifted = ps.shift(1)
+ unshifted = shifted.shift(-1)
+ tm.assert_index_equal(shifted.index, ps.index)
+ tm.assert_index_equal(unshifted.index, ps.index)
+ tm.assert_numpy_array_equal(
+ unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
+ )
+
+ shifted2 = ps.shift(1, "B")
+ shifted3 = ps.shift(1, offsets.BDay())
+ tm.assert_frame_equal(shifted2, shifted3)
+ tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
+
+ msg = "does not match PeriodIndex freq"
+ with pytest.raises(ValueError, match=msg):
+ ps.shift(freq="D")
+
+ # shift other axis
+ # GH#6371
+ df = DataFrame(np.random.rand(10, 5))
+ expected = pd.concat(
+ [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
+ ignore_index=True,
+ axis=1,
+ )
+ result = df.shift(1, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ # shift named axis
+ df = DataFrame(np.random.rand(10, 5))
+ expected = pd.concat(
+ [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
+ ignore_index=True,
+ axis=1,
+ )
+ result = df.shift(1, axis="columns")
+ tm.assert_frame_equal(result, expected)
+
+ def test_shift_bool(self):
+ df = DataFrame({"high": [True, False], "low": [False, False]})
+ rs = df.shift(1)
+ xp = DataFrame(
+ np.array([[np.nan, np.nan], [True, False]], dtype=object),
+ columns=["high", "low"],
+ )
+ tm.assert_frame_equal(rs, xp)
+
+ def test_shift_categorical(self):
+ # GH#9416
+ s1 = pd.Series(["a", "b", "c"], dtype="category")
+ s2 = pd.Series(["A", "B", "C"], dtype="category")
+ df = DataFrame({"one": s1, "two": s2})
+ rs = df.shift(1)
+ xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
+ tm.assert_frame_equal(rs, xp)
+
+ def test_shift_fill_value(self):
+ # GH#24128
+ df = DataFrame(
+ [1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H")
+ )
+ exp = DataFrame(
+ [0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H")
+ )
+ result = df.shift(1, fill_value=0)
+ tm.assert_frame_equal(result, exp)
+
+ exp = DataFrame(
+ [0, 0, 1, 2, 3], index=date_range("1/1/2000", periods=5, freq="H")
+ )
+ result = df.shift(2, fill_value=0)
+ tm.assert_frame_equal(result, exp)
+
+ def test_shift_empty(self):
+ # Regression test for GH#8019
+ df = DataFrame({"foo": []})
+ rs = df.shift(-1)
+
+ tm.assert_frame_equal(df, rs)
+
+ def test_shift_duplicate_columns(self):
+ # GH#9092; verify that position-based shifting works
+ # in the presence of duplicate columns
+ column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
+ data = np.random.randn(20, 5)
+
+ shifted = []
+ for columns in column_lists:
+ df = pd.DataFrame(data.copy(), columns=columns)
+ for s in range(5):
+ df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
+ df.columns = range(5)
+ shifted.append(df)
+
+ # sanity check the base case
+ nulls = shifted[0].isna().sum()
+ tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
+
+ # check all answers are the same
+ tm.assert_frame_equal(shifted[0], shifted[1])
+ tm.assert_frame_equal(shifted[0], shifted[2])
+
+ def test_tshift(self, datetime_frame):
+ # PeriodIndex
+ ps = tm.makePeriodFrame()
+ shifted = ps.tshift(1)
+ unshifted = shifted.tshift(-1)
+
+ tm.assert_frame_equal(unshifted, ps)
+
+ shifted2 = ps.tshift(freq="B")
+ tm.assert_frame_equal(shifted, shifted2)
+
+ shifted3 = ps.tshift(freq=offsets.BDay())
+ tm.assert_frame_equal(shifted, shifted3)
+
+ with pytest.raises(ValueError, match="does not match"):
+ ps.tshift(freq="M")
+
+ # DatetimeIndex
+ shifted = datetime_frame.tshift(1)
+ unshifted = shifted.tshift(-1)
+
+ tm.assert_frame_equal(datetime_frame, unshifted)
+
+ shifted2 = datetime_frame.tshift(freq=datetime_frame.index.freq)
+ tm.assert_frame_equal(shifted, shifted2)
+
+ inferred_ts = DataFrame(
+ datetime_frame.values,
+ Index(np.asarray(datetime_frame.index)),
+ columns=datetime_frame.columns,
+ )
+ shifted = inferred_ts.tshift(1)
+ unshifted = shifted.tshift(-1)
+ tm.assert_frame_equal(shifted, datetime_frame.tshift(1))
+ tm.assert_frame_equal(unshifted, inferred_ts)
+
+ no_freq = datetime_frame.iloc[[0, 5, 7], :]
+ msg = "Freq was not given and was not set in the index"
+ with pytest.raises(ValueError, match=msg):
+ no_freq.tshift()
diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py
new file mode 100644
index 0000000000000..556d86bed8f14
--- /dev/null
+++ b/pandas/tests/frame/methods/test_to_dict.py
@@ -0,0 +1,258 @@
+from collections import OrderedDict, defaultdict
+from datetime import datetime
+
+import numpy as np
+import pytest
+import pytz
+
+from pandas import DataFrame, Series, Timestamp
+import pandas.util.testing as tm
+
+
+class TestDataFrameToDict:
+ def test_to_dict_timestamp(self):
+
+ # GH#11247
+ # split/records producing np.datetime64 rather than Timestamps
+ # on datetime64[ns] dtypes only
+
+ tsmp = Timestamp("20130101")
+ test_data = DataFrame({"A": [tsmp, tsmp], "B": [tsmp, tsmp]})
+ test_data_mixed = DataFrame({"A": [tsmp, tsmp], "B": [1, 2]})
+
+ expected_records = [{"A": tsmp, "B": tsmp}, {"A": tsmp, "B": tsmp}]
+ expected_records_mixed = [{"A": tsmp, "B": 1}, {"A": tsmp, "B": 2}]
+
+ assert test_data.to_dict(orient="records") == expected_records
+ assert test_data_mixed.to_dict(orient="records") == expected_records_mixed
+
+ expected_series = {
+ "A": Series([tsmp, tsmp], name="A"),
+ "B": Series([tsmp, tsmp], name="B"),
+ }
+ expected_series_mixed = {
+ "A": Series([tsmp, tsmp], name="A"),
+ "B": Series([1, 2], name="B"),
+ }
+
+ tm.assert_dict_equal(test_data.to_dict(orient="series"), expected_series)
+ tm.assert_dict_equal(
+ test_data_mixed.to_dict(orient="series"), expected_series_mixed
+ )
+
+ expected_split = {
+ "index": [0, 1],
+ "data": [[tsmp, tsmp], [tsmp, tsmp]],
+ "columns": ["A", "B"],
+ }
+ expected_split_mixed = {
+ "index": [0, 1],
+ "data": [[tsmp, 1], [tsmp, 2]],
+ "columns": ["A", "B"],
+ }
+
+ tm.assert_dict_equal(test_data.to_dict(orient="split"), expected_split)
+ tm.assert_dict_equal(
+ test_data_mixed.to_dict(orient="split"), expected_split_mixed
+ )
+
+ def test_to_dict_index_not_unique_with_index_orient(self):
+ # GH#22801
+ # Data loss when indexes are not unique. Raise ValueError.
+ df = DataFrame({"a": [1, 2], "b": [0.5, 0.75]}, index=["A", "A"])
+ msg = "DataFrame index must be unique for orient='index'"
+ with pytest.raises(ValueError, match=msg):
+ df.to_dict(orient="index")
+
+ def test_to_dict_invalid_orient(self):
+ df = DataFrame({"A": [0, 1]})
+ msg = "orient 'xinvalid' not understood"
+ with pytest.raises(ValueError, match=msg):
+ df.to_dict(orient="xinvalid")
+
+ @pytest.mark.parametrize("mapping", [dict, defaultdict(list), OrderedDict])
+ def test_to_dict(self, mapping):
+ test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
+
+ # GH#16122
+ recons_data = DataFrame(test_data).to_dict(into=mapping)
+
+ for k, v in test_data.items():
+ for k2, v2 in v.items():
+ assert v2 == recons_data[k][k2]
+
+ recons_data = DataFrame(test_data).to_dict("l", mapping)
+
+ for k, v in test_data.items():
+ for k2, v2 in v.items():
+ assert v2 == recons_data[k][int(k2) - 1]
+
+ recons_data = DataFrame(test_data).to_dict("s", mapping)
+
+ for k, v in test_data.items():
+ for k2, v2 in v.items():
+ assert v2 == recons_data[k][k2]
+
+ recons_data = DataFrame(test_data).to_dict("sp", mapping)
+ expected_split = {
+ "columns": ["A", "B"],
+ "index": ["1", "2", "3"],
+ "data": [[1.0, "1"], [2.0, "2"], [np.nan, "3"]],
+ }
+ tm.assert_dict_equal(recons_data, expected_split)
+
+ recons_data = DataFrame(test_data).to_dict("r", mapping)
+ expected_records = [
+ {"A": 1.0, "B": "1"},
+ {"A": 2.0, "B": "2"},
+ {"A": np.nan, "B": "3"},
+ ]
+ assert isinstance(recons_data, list)
+ assert len(recons_data) == 3
+ for l, r in zip(recons_data, expected_records):
+ tm.assert_dict_equal(l, r)
+
+ # GH#10844
+ recons_data = DataFrame(test_data).to_dict("i")
+
+ for k, v in test_data.items():
+ for k2, v2 in v.items():
+ assert v2 == recons_data[k2][k]
+
+ df = DataFrame(test_data)
+ df["duped"] = df[df.columns[0]]
+ recons_data = df.to_dict("i")
+ comp_data = test_data.copy()
+ comp_data["duped"] = comp_data[df.columns[0]]
+ for k, v in comp_data.items():
+ for k2, v2 in v.items():
+ assert v2 == recons_data[k2][k]
+
+ @pytest.mark.parametrize("mapping", [list, defaultdict, []])
+ def test_to_dict_errors(self, mapping):
+ # GH#16122
+ df = DataFrame(np.random.randn(3, 3))
+ with pytest.raises(TypeError):
+ df.to_dict(into=mapping)
+
+ def test_to_dict_not_unique_warning(self):
+ # GH#16927: When converting to a dict, if a column has a non-unique name
+ # it will be dropped, throwing a warning.
+ df = DataFrame([[1, 2, 3]], columns=["a", "a", "b"])
+ with tm.assert_produces_warning(UserWarning):
+ df.to_dict()
+
+ # orient - orient argument to to_dict function
+ # item_getter - function for extracting value from
+ # the resulting dict using column name and index
+ @pytest.mark.parametrize(
+ "orient,item_getter",
+ [
+ ("dict", lambda d, col, idx: d[col][idx]),
+ ("records", lambda d, col, idx: d[idx][col]),
+ ("list", lambda d, col, idx: d[col][idx]),
+ ("split", lambda d, col, idx: d["data"][idx][d["columns"].index(col)]),
+ ("index", lambda d, col, idx: d[idx][col]),
+ ],
+ )
+ def test_to_dict_box_scalars(self, orient, item_getter):
+ # GH#14216, GH#23753
+ # make sure that we are boxing properly
+ df = DataFrame({"a": [1, 2], "b": [0.1, 0.2]})
+ result = df.to_dict(orient=orient)
+ assert isinstance(item_getter(result, "a", 0), int)
+ assert isinstance(item_getter(result, "b", 0), float)
+
+ def test_to_dict_tz(self):
+ # GH#18372 When converting to dict with orient='records' columns of
+ # datetime that are tz-aware were not converted to required arrays
+ data = [
+ (datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),
+ (datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc),),
+ ]
+ df = DataFrame(list(data), columns=["d"])
+
+ result = df.to_dict(orient="records")
+ expected = [
+ {"d": Timestamp("2017-11-18 21:53:00.219225+0000", tz=pytz.utc)},
+ {"d": Timestamp("2017-11-18 22:06:30.061810+0000", tz=pytz.utc)},
+ ]
+ tm.assert_dict_equal(result[0], expected[0])
+ tm.assert_dict_equal(result[1], expected[1])
+
+ @pytest.mark.parametrize(
+ "into, expected",
+ [
+ (
+ dict,
+ {
+ 0: {"int_col": 1, "float_col": 1.0},
+ 1: {"int_col": 2, "float_col": 2.0},
+ 2: {"int_col": 3, "float_col": 3.0},
+ },
+ ),
+ (
+ OrderedDict,
+ OrderedDict(
+ [
+ (0, {"int_col": 1, "float_col": 1.0}),
+ (1, {"int_col": 2, "float_col": 2.0}),
+ (2, {"int_col": 3, "float_col": 3.0}),
+ ]
+ ),
+ ),
+ (
+ defaultdict(dict),
+ defaultdict(
+ dict,
+ {
+ 0: {"int_col": 1, "float_col": 1.0},
+ 1: {"int_col": 2, "float_col": 2.0},
+ 2: {"int_col": 3, "float_col": 3.0},
+ },
+ ),
+ ),
+ ],
+ )
+ def test_to_dict_index_dtypes(self, into, expected):
+ # GH#18580
+ # When using to_dict(orient='index') on a dataframe with int
+ # and float columns only the int columns were cast to float
+
+ df = DataFrame({"int_col": [1, 2, 3], "float_col": [1.0, 2.0, 3.0]})
+
+ result = df.to_dict(orient="index", into=into)
+ cols = ["int_col", "float_col"]
+ result = DataFrame.from_dict(result, orient="index")[cols]
+ expected = DataFrame.from_dict(expected, orient="index")[cols]
+ tm.assert_frame_equal(result, expected)
+
+ def test_to_dict_numeric_names(self):
+ # GH#24940
+ df = DataFrame({str(i): [i] for i in range(5)})
+ result = set(df.to_dict("records")[0].keys())
+ expected = set(df.columns)
+ assert result == expected
+
+ def test_to_dict_wide(self):
+ # GH#24939
+ df = DataFrame({("A_{:d}".format(i)): [i] for i in range(256)})
+ result = df.to_dict("records")[0]
+ expected = {"A_{:d}".format(i): i for i in range(256)}
+ assert result == expected
+
+ def test_to_dict_orient_dtype(self):
+ # GH#22620
+ # Input Data
+ input_data = {"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["X", "Y", "Z"]}
+ df = DataFrame(input_data)
+ # Expected Dtypes
+ expected = {"a": int, "b": float, "c": str}
+ # Extracting dtypes out of to_dict operation
+ for df_dict in df.to_dict("records"):
+ result = {
+ "a": type(df_dict["a"]),
+ "b": type(df_dict["b"]),
+ "c": type(df_dict["c"]),
+ }
+ assert result == expected
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/methods/test_to_records.py
similarity index 57%
rename from pandas/tests/frame/test_convert_to.py
rename to pandas/tests/frame/methods/test_to_records.py
index 63a98fda974a6..eb69e8b297a6a 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/methods/test_to_records.py
@@ -1,82 +1,13 @@
-from collections import OrderedDict, abc, defaultdict
-from datetime import datetime
+from collections import abc
import numpy as np
import pytest
-import pytz
-
-from pandas import (
- CategoricalDtype,
- DataFrame,
- MultiIndex,
- Series,
- Timestamp,
- date_range,
-)
-import pandas.util.testing as tm
-
-
-class TestDataFrameConvertTo:
- def test_to_dict_timestamp(self):
-
- # GH11247
- # split/records producing np.datetime64 rather than Timestamps
- # on datetime64[ns] dtypes only
-
- tsmp = Timestamp("20130101")
- test_data = DataFrame({"A": [tsmp, tsmp], "B": [tsmp, tsmp]})
- test_data_mixed = DataFrame({"A": [tsmp, tsmp], "B": [1, 2]})
-
- expected_records = [{"A": tsmp, "B": tsmp}, {"A": tsmp, "B": tsmp}]
- expected_records_mixed = [{"A": tsmp, "B": 1}, {"A": tsmp, "B": 2}]
-
- assert test_data.to_dict(orient="records") == expected_records
- assert test_data_mixed.to_dict(orient="records") == expected_records_mixed
-
- expected_series = {
- "A": Series([tsmp, tsmp], name="A"),
- "B": Series([tsmp, tsmp], name="B"),
- }
- expected_series_mixed = {
- "A": Series([tsmp, tsmp], name="A"),
- "B": Series([1, 2], name="B"),
- }
-
- tm.assert_dict_equal(test_data.to_dict(orient="series"), expected_series)
- tm.assert_dict_equal(
- test_data_mixed.to_dict(orient="series"), expected_series_mixed
- )
-
- expected_split = {
- "index": [0, 1],
- "data": [[tsmp, tsmp], [tsmp, tsmp]],
- "columns": ["A", "B"],
- }
- expected_split_mixed = {
- "index": [0, 1],
- "data": [[tsmp, 1], [tsmp, 2]],
- "columns": ["A", "B"],
- }
-
- tm.assert_dict_equal(test_data.to_dict(orient="split"), expected_split)
- tm.assert_dict_equal(
- test_data_mixed.to_dict(orient="split"), expected_split_mixed
- )
- def test_to_dict_index_not_unique_with_index_orient(self):
- # GH22801
- # Data loss when indexes are not unique. Raise ValueError.
- df = DataFrame({"a": [1, 2], "b": [0.5, 0.75]}, index=["A", "A"])
- msg = "DataFrame index must be unique for orient='index'"
- with pytest.raises(ValueError, match=msg):
- df.to_dict(orient="index")
+from pandas import CategoricalDtype, DataFrame, MultiIndex, Series, date_range
+import pandas.util.testing as tm
- def test_to_dict_invalid_orient(self):
- df = DataFrame({"A": [0, 1]})
- msg = "orient 'xinvalid' not understood"
- with pytest.raises(ValueError, match=msg):
- df.to_dict(orient="xinvalid")
+class TestDataFrameToRecords:
def test_to_records_dt64(self):
df = DataFrame(
[["one", "two", "three"], ["four", "five", "six"]],
@@ -88,7 +19,7 @@ def test_to_records_dt64(self):
assert expected == result
def test_to_records_with_multindex(self):
- # GH3189
+ # GH#3189
index = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
@@ -136,15 +67,15 @@ def test_to_records_index_name(self):
assert "level_0" in rs.dtype.fields
def test_to_records_with_unicode_index(self):
- # GH13172
+ # GH#13172
# unicode_literals conflict with to_records
result = DataFrame([{"a": "x", "b": "y"}]).set_index("a").to_records()
expected = np.rec.array([("x", "y")], dtype=[("a", "O"), ("b", "O")])
tm.assert_almost_equal(result, expected)
def test_to_records_with_unicode_column_names(self):
- # xref issue: https://github.com/numpy/numpy/issues/2407
- # Issue #11879. to_records used to raise an exception when used
+ # xref GH#2407
+ # Issue GH#11879. to_records used to raise an exception when used
# with column names containing non-ascii characters in Python 2
result = DataFrame(data={"accented_name_é": [1.0]}).to_records()
@@ -157,8 +88,7 @@ def test_to_records_with_unicode_column_names(self):
tm.assert_almost_equal(result, expected)
def test_to_records_with_categorical(self):
-
- # GH8626
+ # GH#8626
# dict creation
df = DataFrame({"A": list("abc")}, dtype="category")
@@ -310,7 +240,7 @@ def test_to_records_with_categorical(self):
],
)
def test_to_records_dtype(self, kwargs, expected):
- # see gh-18146
+ # see GH#18146
df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
if not isinstance(expected, np.recarray):
@@ -383,12 +313,12 @@ def test_to_records_dtype(self, kwargs, expected):
],
)
def test_to_records_dtype_mi(self, df, kwargs, expected):
- # see gh-18146
+ # see GH#18146
result = df.to_records(**kwargs)
tm.assert_almost_equal(result, expected)
def test_to_records_dict_like(self):
- # see gh-18146
+ # see GH#18146
class DictLike:
def __init__(self, **kwargs):
self.d = kwargs.copy()
@@ -416,81 +346,9 @@ def keys(self):
)
tm.assert_almost_equal(result, expected)
- @pytest.mark.parametrize("mapping", [dict, defaultdict(list), OrderedDict])
- def test_to_dict(self, mapping):
- test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
-
- # GH16122
- recons_data = DataFrame(test_data).to_dict(into=mapping)
-
- for k, v in test_data.items():
- for k2, v2 in v.items():
- assert v2 == recons_data[k][k2]
-
- recons_data = DataFrame(test_data).to_dict("l", mapping)
-
- for k, v in test_data.items():
- for k2, v2 in v.items():
- assert v2 == recons_data[k][int(k2) - 1]
-
- recons_data = DataFrame(test_data).to_dict("s", mapping)
-
- for k, v in test_data.items():
- for k2, v2 in v.items():
- assert v2 == recons_data[k][k2]
-
- recons_data = DataFrame(test_data).to_dict("sp", mapping)
- expected_split = {
- "columns": ["A", "B"],
- "index": ["1", "2", "3"],
- "data": [[1.0, "1"], [2.0, "2"], [np.nan, "3"]],
- }
- tm.assert_dict_equal(recons_data, expected_split)
-
- recons_data = DataFrame(test_data).to_dict("r", mapping)
- expected_records = [
- {"A": 1.0, "B": "1"},
- {"A": 2.0, "B": "2"},
- {"A": np.nan, "B": "3"},
- ]
- assert isinstance(recons_data, list)
- assert len(recons_data) == 3
- for l, r in zip(recons_data, expected_records):
- tm.assert_dict_equal(l, r)
-
- # GH10844
- recons_data = DataFrame(test_data).to_dict("i")
-
- for k, v in test_data.items():
- for k2, v2 in v.items():
- assert v2 == recons_data[k2][k]
-
- df = DataFrame(test_data)
- df["duped"] = df[df.columns[0]]
- recons_data = df.to_dict("i")
- comp_data = test_data.copy()
- comp_data["duped"] = comp_data[df.columns[0]]
- for k, v in comp_data.items():
- for k2, v2 in v.items():
- assert v2 == recons_data[k2][k]
-
- @pytest.mark.parametrize("mapping", [list, defaultdict, []])
- def test_to_dict_errors(self, mapping):
- # GH16122
- df = DataFrame(np.random.randn(3, 3))
- with pytest.raises(TypeError):
- df.to_dict(into=mapping)
-
- def test_to_dict_not_unique_warning(self):
- # GH16927: When converting to a dict, if a column has a non-unique name
- # it will be dropped, throwing a warning.
- df = DataFrame([[1, 2, 3]], columns=["a", "a", "b"])
- with tm.assert_produces_warning(UserWarning):
- df.to_dict()
-
@pytest.mark.parametrize("tz", ["UTC", "GMT", "US/Eastern"])
def test_to_records_datetimeindex_with_tz(self, tz):
- # GH13937
+ # GH#13937
dr = date_range("2016-01-01", periods=10, freq="S", tz=tz)
df = DataFrame({"datetime": dr}, index=dr)
@@ -500,118 +358,3 @@ def test_to_records_datetimeindex_with_tz(self, tz):
# both converted to UTC, so they are equal
tm.assert_numpy_array_equal(result, expected)
-
- # orient - orient argument to to_dict function
- # item_getter - function for extracting value from
- # the resulting dict using column name and index
- @pytest.mark.parametrize(
- "orient,item_getter",
- [
- ("dict", lambda d, col, idx: d[col][idx]),
- ("records", lambda d, col, idx: d[idx][col]),
- ("list", lambda d, col, idx: d[col][idx]),
- ("split", lambda d, col, idx: d["data"][idx][d["columns"].index(col)]),
- ("index", lambda d, col, idx: d[idx][col]),
- ],
- )
- def test_to_dict_box_scalars(self, orient, item_getter):
- # 14216, 23753
- # make sure that we are boxing properly
- df = DataFrame({"a": [1, 2], "b": [0.1, 0.2]})
- result = df.to_dict(orient=orient)
- assert isinstance(item_getter(result, "a", 0), int)
- assert isinstance(item_getter(result, "b", 0), float)
-
- def test_frame_to_dict_tz(self):
- # GH18372 When converting to dict with orient='records' columns of
- # datetime that are tz-aware were not converted to required arrays
- data = [
- (datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),
- (datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc),),
- ]
- df = DataFrame(list(data), columns=["d"])
-
- result = df.to_dict(orient="records")
- expected = [
- {"d": Timestamp("2017-11-18 21:53:00.219225+0000", tz=pytz.utc)},
- {"d": Timestamp("2017-11-18 22:06:30.061810+0000", tz=pytz.utc)},
- ]
- tm.assert_dict_equal(result[0], expected[0])
- tm.assert_dict_equal(result[1], expected[1])
-
- @pytest.mark.parametrize(
- "into, expected",
- [
- (
- dict,
- {
- 0: {"int_col": 1, "float_col": 1.0},
- 1: {"int_col": 2, "float_col": 2.0},
- 2: {"int_col": 3, "float_col": 3.0},
- },
- ),
- (
- OrderedDict,
- OrderedDict(
- [
- (0, {"int_col": 1, "float_col": 1.0}),
- (1, {"int_col": 2, "float_col": 2.0}),
- (2, {"int_col": 3, "float_col": 3.0}),
- ]
- ),
- ),
- (
- defaultdict(dict),
- defaultdict(
- dict,
- {
- 0: {"int_col": 1, "float_col": 1.0},
- 1: {"int_col": 2, "float_col": 2.0},
- 2: {"int_col": 3, "float_col": 3.0},
- },
- ),
- ),
- ],
- )
- def test_to_dict_index_dtypes(self, into, expected):
- # GH 18580
- # When using to_dict(orient='index') on a dataframe with int
- # and float columns only the int columns were cast to float
-
- df = DataFrame({"int_col": [1, 2, 3], "float_col": [1.0, 2.0, 3.0]})
-
- result = df.to_dict(orient="index", into=into)
- cols = ["int_col", "float_col"]
- result = DataFrame.from_dict(result, orient="index")[cols]
- expected = DataFrame.from_dict(expected, orient="index")[cols]
- tm.assert_frame_equal(result, expected)
-
- def test_to_dict_numeric_names(self):
- # https://github.com/pandas-dev/pandas/issues/24940
- df = DataFrame({str(i): [i] for i in range(5)})
- result = set(df.to_dict("records")[0].keys())
- expected = set(df.columns)
- assert result == expected
-
- def test_to_dict_wide(self):
- # https://github.com/pandas-dev/pandas/issues/24939
- df = DataFrame({("A_{:d}".format(i)): [i] for i in range(256)})
- result = df.to_dict("records")[0]
- expected = {"A_{:d}".format(i): i for i in range(256)}
- assert result == expected
-
- def test_to_dict_orient_dtype(self):
- # https://github.com/pandas-dev/pandas/issues/22620
- # Input Data
- input_data = {"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["X", "Y", "Z"]}
- df = DataFrame(input_data)
- # Expected Dtypes
- expected = {"a": int, "b": float, "c": str}
- # Extracting dtypes out of to_dict operation
- for df_dict in df.to_dict("records"):
- result = {
- "a": type(df_dict["a"]),
- "b": type(df_dict["b"]),
- "c": type(df_dict["c"]),
- }
- assert result == expected
diff --git a/pandas/tests/frame/methods/test_truncate.py b/pandas/tests/frame/methods/test_truncate.py
new file mode 100644
index 0000000000000..a021a99a45a5c
--- /dev/null
+++ b/pandas/tests/frame/methods/test_truncate.py
@@ -0,0 +1,89 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas.util.testing as tm
+
+
+class TestDataFrameTruncate:
+ def test_truncate(self, datetime_frame):
+ ts = datetime_frame[::3]
+
+ start, end = datetime_frame.index[3], datetime_frame.index[6]
+
+ start_missing = datetime_frame.index[2]
+ end_missing = datetime_frame.index[7]
+
+ # neither specified
+ truncated = ts.truncate()
+ tm.assert_frame_equal(truncated, ts)
+
+ # both specified
+ expected = ts[1:3]
+
+ truncated = ts.truncate(start, end)
+ tm.assert_frame_equal(truncated, expected)
+
+ truncated = ts.truncate(start_missing, end_missing)
+ tm.assert_frame_equal(truncated, expected)
+
+ # start specified
+ expected = ts[1:]
+
+ truncated = ts.truncate(before=start)
+ tm.assert_frame_equal(truncated, expected)
+
+ truncated = ts.truncate(before=start_missing)
+ tm.assert_frame_equal(truncated, expected)
+
+ # end specified
+ expected = ts[:3]
+
+ truncated = ts.truncate(after=end)
+ tm.assert_frame_equal(truncated, expected)
+
+ truncated = ts.truncate(after=end_missing)
+ tm.assert_frame_equal(truncated, expected)
+
+ msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00"
+ with pytest.raises(ValueError, match=msg):
+ ts.truncate(
+ before=ts.index[-1] - ts.index.freq, after=ts.index[0] + ts.index.freq
+ )
+
+ def test_truncate_copy(self, datetime_frame):
+ index = datetime_frame.index
+ truncated = datetime_frame.truncate(index[5], index[10])
+ truncated.values[:] = 5.0
+ assert not (datetime_frame.values[5:11] == 5).any()
+
+ def test_truncate_nonsortedindex(self):
+ # GH#17935
+
+ df = pd.DataFrame({"A": ["a", "b", "c", "d", "e"]}, index=[5, 3, 2, 9, 0])
+ msg = "truncate requires a sorted index"
+ with pytest.raises(ValueError, match=msg):
+ df.truncate(before=3, after=9)
+
+ rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
+ ts = pd.DataFrame(
+ {"A": np.random.randn(len(rng)), "B": np.random.randn(len(rng))}, index=rng
+ )
+ msg = "truncate requires a sorted index"
+ with pytest.raises(ValueError, match=msg):
+ ts.sort_values("A", ascending=False).truncate(
+ before="2011-11", after="2011-12"
+ )
+
+ df = pd.DataFrame(
+ {
+ 3: np.random.randn(5),
+ 20: np.random.randn(5),
+ 2: np.random.randn(5),
+ 0: np.random.randn(5),
+ },
+ columns=[3, 20, 2, 0],
+ )
+ msg = "truncate requires a sorted index"
+ with pytest.raises(ValueError, match=msg):
+ df.truncate(before=2, after=20, axis=1)
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index f6d2f58a63b53..b9df3ce305dbc 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -12,7 +12,6 @@
Index,
MultiIndex,
Series,
- Timestamp,
date_range,
period_range,
to_datetime,
@@ -28,118 +27,6 @@ def close_open_fixture(request):
class TestDataFrameTimeSeriesMethods:
- def test_diff(self, datetime_frame):
- the_diff = datetime_frame.diff(1)
-
- tm.assert_series_equal(
- the_diff["A"], datetime_frame["A"] - datetime_frame["A"].shift(1)
- )
-
- # int dtype
- a = 10000000000000000
- b = a + 1
- s = Series([a, b])
-
- rs = DataFrame({"s": s}).diff()
- assert rs.s[1] == 1
-
- # mixed numeric
- tf = datetime_frame.astype("float32")
- the_diff = tf.diff(1)
- tm.assert_series_equal(the_diff["A"], tf["A"] - tf["A"].shift(1))
-
- # issue 10907
- df = pd.DataFrame({"y": pd.Series([2]), "z": pd.Series([3])})
- df.insert(0, "x", 1)
- result = df.diff(axis=1)
- expected = pd.DataFrame(
- {"x": np.nan, "y": pd.Series(1), "z": pd.Series(1)}
- ).astype("float64")
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("tz", [None, "UTC"])
- def test_diff_datetime_axis0(self, tz):
- # GH 18578
- df = DataFrame(
- {
- 0: date_range("2010", freq="D", periods=2, tz=tz),
- 1: date_range("2010", freq="D", periods=2, tz=tz),
- }
- )
-
- result = df.diff(axis=0)
- expected = DataFrame(
- {
- 0: pd.TimedeltaIndex(["NaT", "1 days"]),
- 1: pd.TimedeltaIndex(["NaT", "1 days"]),
- }
- )
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("tz", [None, "UTC"])
- def test_diff_datetime_axis1(self, tz):
- # GH 18578
- df = DataFrame(
- {
- 0: date_range("2010", freq="D", periods=2, tz=tz),
- 1: date_range("2010", freq="D", periods=2, tz=tz),
- }
- )
- if tz is None:
- result = df.diff(axis=1)
- expected = DataFrame(
- {
- 0: pd.TimedeltaIndex(["NaT", "NaT"]),
- 1: pd.TimedeltaIndex(["0 days", "0 days"]),
- }
- )
- tm.assert_frame_equal(result, expected)
- else:
- with pytest.raises(NotImplementedError):
- result = df.diff(axis=1)
-
- def test_diff_timedelta(self):
- # GH 4533
- df = DataFrame(
- dict(
- time=[Timestamp("20130101 9:01"), Timestamp("20130101 9:02")],
- value=[1.0, 2.0],
- )
- )
-
- res = df.diff()
- exp = DataFrame(
- [[pd.NaT, np.nan], [pd.Timedelta("00:01:00"), 1]], columns=["time", "value"]
- )
- tm.assert_frame_equal(res, exp)
-
- def test_diff_mixed_dtype(self):
- df = DataFrame(np.random.randn(5, 3))
- df["A"] = np.array([1, 2, 3, 4, 5], dtype=object)
-
- result = df.diff()
- assert result[0].dtype == np.float64
-
- def test_diff_neg_n(self, datetime_frame):
- rs = datetime_frame.diff(-1)
- xp = datetime_frame - datetime_frame.shift(-1)
- tm.assert_frame_equal(rs, xp)
-
- def test_diff_float_n(self, datetime_frame):
- rs = datetime_frame.diff(1.0)
- xp = datetime_frame.diff(1)
- tm.assert_frame_equal(rs, xp)
-
- def test_diff_axis(self):
- # GH 9727
- df = DataFrame([[1.0, 2.0], [3.0, 4.0]])
- tm.assert_frame_equal(
- df.diff(axis=1), DataFrame([[np.nan, 1.0], [np.nan, 1.0]])
- )
- tm.assert_frame_equal(
- df.diff(axis=0), DataFrame([[np.nan, np.nan], [2.0, 2.0]])
- )
-
def test_pct_change(self, datetime_frame):
rs = datetime_frame.pct_change(fill_method=None)
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
@@ -249,267 +136,6 @@ def test_frame_append_datetime64_col_other_units(self):
assert (tmp["dates"].values == ex_vals).all()
- def test_shift(self, datetime_frame, int_frame):
- # naive shift
- shiftedFrame = datetime_frame.shift(5)
- tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
-
- shiftedSeries = datetime_frame["A"].shift(5)
- tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
-
- shiftedFrame = datetime_frame.shift(-5)
- tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
-
- shiftedSeries = datetime_frame["A"].shift(-5)
- tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
-
- # shift by 0
- unshifted = datetime_frame.shift(0)
- tm.assert_frame_equal(unshifted, datetime_frame)
-
- # shift by DateOffset
- shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
- assert len(shiftedFrame) == len(datetime_frame)
-
- shiftedFrame2 = datetime_frame.shift(5, freq="B")
- tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
-
- d = datetime_frame.index[0]
- shifted_d = d + offsets.BDay(5)
- tm.assert_series_equal(
- datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
- )
-
- # shift int frame
- int_shifted = int_frame.shift(1) # noqa
-
- # Shifting with PeriodIndex
- ps = tm.makePeriodFrame()
- shifted = ps.shift(1)
- unshifted = shifted.shift(-1)
- tm.assert_index_equal(shifted.index, ps.index)
- tm.assert_index_equal(unshifted.index, ps.index)
- tm.assert_numpy_array_equal(
- unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
- )
-
- shifted2 = ps.shift(1, "B")
- shifted3 = ps.shift(1, offsets.BDay())
- tm.assert_frame_equal(shifted2, shifted3)
- tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
-
- msg = "does not match PeriodIndex freq"
- with pytest.raises(ValueError, match=msg):
- ps.shift(freq="D")
-
- # shift other axis
- # GH 6371
- df = DataFrame(np.random.rand(10, 5))
- expected = pd.concat(
- [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
- ignore_index=True,
- axis=1,
- )
- result = df.shift(1, axis=1)
- tm.assert_frame_equal(result, expected)
-
- # shift named axis
- df = DataFrame(np.random.rand(10, 5))
- expected = pd.concat(
- [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
- ignore_index=True,
- axis=1,
- )
- result = df.shift(1, axis="columns")
- tm.assert_frame_equal(result, expected)
-
- def test_shift_bool(self):
- df = DataFrame({"high": [True, False], "low": [False, False]})
- rs = df.shift(1)
- xp = DataFrame(
- np.array([[np.nan, np.nan], [True, False]], dtype=object),
- columns=["high", "low"],
- )
- tm.assert_frame_equal(rs, xp)
-
- def test_shift_categorical(self):
- # GH 9416
- s1 = pd.Series(["a", "b", "c"], dtype="category")
- s2 = pd.Series(["A", "B", "C"], dtype="category")
- df = DataFrame({"one": s1, "two": s2})
- rs = df.shift(1)
- xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
- tm.assert_frame_equal(rs, xp)
-
- def test_shift_fill_value(self):
- # GH #24128
- df = DataFrame(
- [1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H")
- )
- exp = DataFrame(
- [0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H")
- )
- result = df.shift(1, fill_value=0)
- tm.assert_frame_equal(result, exp)
-
- exp = DataFrame(
- [0, 0, 1, 2, 3], index=date_range("1/1/2000", periods=5, freq="H")
- )
- result = df.shift(2, fill_value=0)
- tm.assert_frame_equal(result, exp)
-
- def test_shift_empty(self):
- # Regression test for #8019
- df = DataFrame({"foo": []})
- rs = df.shift(-1)
-
- tm.assert_frame_equal(df, rs)
-
- def test_shift_duplicate_columns(self):
- # GH 9092; verify that position-based shifting works
- # in the presence of duplicate columns
- column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
- data = np.random.randn(20, 5)
-
- shifted = []
- for columns in column_lists:
- df = pd.DataFrame(data.copy(), columns=columns)
- for s in range(5):
- df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
- df.columns = range(5)
- shifted.append(df)
-
- # sanity check the base case
- nulls = shifted[0].isna().sum()
- tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
-
- # check all answers are the same
- tm.assert_frame_equal(shifted[0], shifted[1])
- tm.assert_frame_equal(shifted[0], shifted[2])
-
- def test_tshift(self, datetime_frame):
- # PeriodIndex
- ps = tm.makePeriodFrame()
- shifted = ps.tshift(1)
- unshifted = shifted.tshift(-1)
-
- tm.assert_frame_equal(unshifted, ps)
-
- shifted2 = ps.tshift(freq="B")
- tm.assert_frame_equal(shifted, shifted2)
-
- shifted3 = ps.tshift(freq=offsets.BDay())
- tm.assert_frame_equal(shifted, shifted3)
-
- with pytest.raises(ValueError, match="does not match"):
- ps.tshift(freq="M")
-
- # DatetimeIndex
- shifted = datetime_frame.tshift(1)
- unshifted = shifted.tshift(-1)
-
- tm.assert_frame_equal(datetime_frame, unshifted)
-
- shifted2 = datetime_frame.tshift(freq=datetime_frame.index.freq)
- tm.assert_frame_equal(shifted, shifted2)
-
- inferred_ts = DataFrame(
- datetime_frame.values,
- Index(np.asarray(datetime_frame.index)),
- columns=datetime_frame.columns,
- )
- shifted = inferred_ts.tshift(1)
- unshifted = shifted.tshift(-1)
- tm.assert_frame_equal(shifted, datetime_frame.tshift(1))
- tm.assert_frame_equal(unshifted, inferred_ts)
-
- no_freq = datetime_frame.iloc[[0, 5, 7], :]
- msg = "Freq was not given and was not set in the index"
- with pytest.raises(ValueError, match=msg):
- no_freq.tshift()
-
- def test_truncate(self, datetime_frame):
- ts = datetime_frame[::3]
-
- start, end = datetime_frame.index[3], datetime_frame.index[6]
-
- start_missing = datetime_frame.index[2]
- end_missing = datetime_frame.index[7]
-
- # neither specified
- truncated = ts.truncate()
- tm.assert_frame_equal(truncated, ts)
-
- # both specified
- expected = ts[1:3]
-
- truncated = ts.truncate(start, end)
- tm.assert_frame_equal(truncated, expected)
-
- truncated = ts.truncate(start_missing, end_missing)
- tm.assert_frame_equal(truncated, expected)
-
- # start specified
- expected = ts[1:]
-
- truncated = ts.truncate(before=start)
- tm.assert_frame_equal(truncated, expected)
-
- truncated = ts.truncate(before=start_missing)
- tm.assert_frame_equal(truncated, expected)
-
- # end specified
- expected = ts[:3]
-
- truncated = ts.truncate(after=end)
- tm.assert_frame_equal(truncated, expected)
-
- truncated = ts.truncate(after=end_missing)
- tm.assert_frame_equal(truncated, expected)
-
- msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00"
- with pytest.raises(ValueError, match=msg):
- ts.truncate(
- before=ts.index[-1] - ts.index.freq, after=ts.index[0] + ts.index.freq
- )
-
- def test_truncate_copy(self, datetime_frame):
- index = datetime_frame.index
- truncated = datetime_frame.truncate(index[5], index[10])
- truncated.values[:] = 5.0
- assert not (datetime_frame.values[5:11] == 5).any()
-
- def test_truncate_nonsortedindex(self):
- # GH 17935
-
- df = pd.DataFrame({"A": ["a", "b", "c", "d", "e"]}, index=[5, 3, 2, 9, 0])
- msg = "truncate requires a sorted index"
- with pytest.raises(ValueError, match=msg):
- df.truncate(before=3, after=9)
-
- rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
- ts = pd.DataFrame(
- {"A": np.random.randn(len(rng)), "B": np.random.randn(len(rng))}, index=rng
- )
- msg = "truncate requires a sorted index"
- with pytest.raises(ValueError, match=msg):
- ts.sort_values("A", ascending=False).truncate(
- before="2011-11", after="2011-12"
- )
-
- df = pd.DataFrame(
- {
- 3: np.random.randn(5),
- 20: np.random.randn(5),
- 2: np.random.randn(5),
- 0: np.random.randn(5),
- },
- columns=[3, 20, 2, 0],
- )
- msg = "truncate requires a sorted index"
- with pytest.raises(ValueError, match=msg):
- df.truncate(before=2, after=20, axis=1)
-
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
diff --git a/pandas/tests/series/methods/test_diff.py b/pandas/tests/series/methods/test_diff.py
new file mode 100644
index 0000000000000..cf24c012ef349
--- /dev/null
+++ b/pandas/tests/series/methods/test_diff.py
@@ -0,0 +1,3 @@
+class TestDiff:
+ # TODO: maybe we should have tests for this?
+ pass
diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py
new file mode 100644
index 0000000000000..ad75012d8e621
--- /dev/null
+++ b/pandas/tests/series/methods/test_shift.py
@@ -0,0 +1,222 @@
+import numpy as np
+import pytest
+
+from pandas.errors import NullFrequencyError
+
+import pandas as pd
+from pandas import (
+ DatetimeIndex,
+ Index,
+ NaT,
+ Series,
+ TimedeltaIndex,
+ date_range,
+ offsets,
+)
+import pandas.util.testing as tm
+
+from pandas.tseries.offsets import BDay
+
+
+class TestShift:
+ def test_shift(self, datetime_series):
+ shifted = datetime_series.shift(1)
+ unshifted = shifted.shift(-1)
+
+ tm.assert_index_equal(shifted.index, datetime_series.index)
+ tm.assert_index_equal(unshifted.index, datetime_series.index)
+ tm.assert_numpy_array_equal(
+ unshifted.dropna().values, datetime_series.values[:-1]
+ )
+
+ offset = BDay()
+ shifted = datetime_series.shift(1, freq=offset)
+ unshifted = shifted.shift(-1, freq=offset)
+
+ tm.assert_series_equal(unshifted, datetime_series)
+
+ unshifted = datetime_series.shift(0, freq=offset)
+ tm.assert_series_equal(unshifted, datetime_series)
+
+ shifted = datetime_series.shift(1, freq="B")
+ unshifted = shifted.shift(-1, freq="B")
+
+ tm.assert_series_equal(unshifted, datetime_series)
+
+ # corner case
+ unshifted = datetime_series.shift(0)
+ tm.assert_series_equal(unshifted, datetime_series)
+
+ # Shifting with PeriodIndex
+ ps = tm.makePeriodSeries()
+ shifted = ps.shift(1)
+ unshifted = shifted.shift(-1)
+ tm.assert_index_equal(shifted.index, ps.index)
+ tm.assert_index_equal(unshifted.index, ps.index)
+ tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
+
+ shifted2 = ps.shift(1, "B")
+ shifted3 = ps.shift(1, BDay())
+ tm.assert_series_equal(shifted2, shifted3)
+ tm.assert_series_equal(ps, shifted2.shift(-1, "B"))
+
+ msg = "Given freq D does not match PeriodIndex freq B"
+ with pytest.raises(ValueError, match=msg):
+ ps.shift(freq="D")
+
+ # legacy support
+ shifted4 = ps.shift(1, freq="B")
+ tm.assert_series_equal(shifted2, shifted4)
+
+ shifted5 = ps.shift(1, freq=BDay())
+ tm.assert_series_equal(shifted5, shifted4)
+
+ # 32-bit taking
+ # GH#8129
+ index = date_range("2000-01-01", periods=5)
+ for dtype in ["int32", "int64"]:
+ s1 = Series(np.arange(5, dtype=dtype), index=index)
+ p = s1.iloc[1]
+ result = s1.shift(periods=p)
+ expected = Series([np.nan, 0, 1, 2, 3], index=index)
+ tm.assert_series_equal(result, expected)
+
+ # GH#8260
+ # with tz
+ s = Series(
+ date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
+ )
+ result = s - s.shift()
+
+ exp = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
+ tm.assert_series_equal(result, exp)
+
+ # incompat tz
+ s2 = Series(date_range("2000-01-01 09:00:00", periods=5, tz="CET"), name="foo")
+ msg = "DatetimeArray subtraction must have the same timezones or no timezones"
+ with pytest.raises(TypeError, match=msg):
+ s - s2
+
+ def test_shift2(self):
+ ts = Series(
+ np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
+ )
+
+ result = ts.shift(1, freq="5T")
+ exp_index = ts.index.shift(1, freq="5T")
+ tm.assert_index_equal(result.index, exp_index)
+
+ # GH#1063, multiple of same base
+ result = ts.shift(1, freq="4H")
+ exp_index = ts.index + offsets.Hour(4)
+ tm.assert_index_equal(result.index, exp_index)
+
+ idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
+ msg = "Cannot shift with no freq"
+ with pytest.raises(NullFrequencyError, match=msg):
+ idx.shift(1)
+
+ def test_shift_fill_value(self):
+ # GH#24128
+ ts = Series(
+ [1.0, 2.0, 3.0, 4.0, 5.0], index=date_range("1/1/2000", periods=5, freq="H")
+ )
+
+ exp = Series(
+ [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("1/1/2000", periods=5, freq="H")
+ )
+ # check that fill value works
+ result = ts.shift(1, fill_value=0.0)
+ tm.assert_series_equal(result, exp)
+
+ exp = Series(
+ [0.0, 0.0, 1.0, 2.0, 3.0], index=date_range("1/1/2000", periods=5, freq="H")
+ )
+ result = ts.shift(2, fill_value=0.0)
+ tm.assert_series_equal(result, exp)
+
+ ts = pd.Series([1, 2, 3])
+ res = ts.shift(2, fill_value=0)
+ assert res.dtype == ts.dtype
+
+ def test_shift_categorical_fill_value(self):
+ ts = pd.Series(["a", "b", "c", "d"], dtype="category")
+ res = ts.shift(1, fill_value="a")
+ expected = pd.Series(
+ pd.Categorical(
+ ["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
+ )
+ )
+ tm.assert_equal(res, expected)
+
+ # check for incorrect fill_value
+ msg = "'fill_value=f' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=msg):
+ ts.shift(1, fill_value="f")
+
+ def test_shift_dst(self):
+ # GH#13926
+ dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
+ s = Series(dates)
+
+ res = s.shift(0)
+ tm.assert_series_equal(res, s)
+ assert res.dtype == "datetime64[ns, US/Eastern]"
+
+ res = s.shift(1)
+ exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
+ exp = Series(exp_vals)
+ tm.assert_series_equal(res, exp)
+ assert res.dtype == "datetime64[ns, US/Eastern]"
+
+ res = s.shift(-2)
+ exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
+ exp = Series(exp_vals)
+ tm.assert_series_equal(res, exp)
+ assert res.dtype == "datetime64[ns, US/Eastern]"
+
+ for ex in [10, -10, 20, -20]:
+ res = s.shift(ex)
+ exp = Series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
+ tm.assert_series_equal(res, exp)
+ assert res.dtype == "datetime64[ns, US/Eastern]"
+
+ def test_tshift(self, datetime_series):
+ # PeriodIndex
+ ps = tm.makePeriodSeries()
+ shifted = ps.tshift(1)
+ unshifted = shifted.tshift(-1)
+
+ tm.assert_series_equal(unshifted, ps)
+
+ shifted2 = ps.tshift(freq="B")
+ tm.assert_series_equal(shifted, shifted2)
+
+ shifted3 = ps.tshift(freq=BDay())
+ tm.assert_series_equal(shifted, shifted3)
+
+ msg = "Given freq M does not match PeriodIndex freq B"
+ with pytest.raises(ValueError, match=msg):
+ ps.tshift(freq="M")
+
+ # DatetimeIndex
+ shifted = datetime_series.tshift(1)
+ unshifted = shifted.tshift(-1)
+
+ tm.assert_series_equal(datetime_series, unshifted)
+
+ shifted2 = datetime_series.tshift(freq=datetime_series.index.freq)
+ tm.assert_series_equal(shifted, shifted2)
+
+ inferred_ts = Series(
+ datetime_series.values, Index(np.asarray(datetime_series.index)), name="ts"
+ )
+ shifted = inferred_ts.tshift(1)
+ unshifted = shifted.tshift(-1)
+ tm.assert_series_equal(shifted, datetime_series.tshift(1))
+ tm.assert_series_equal(unshifted, inferred_ts)
+
+ no_freq = datetime_series[[0, 5, 7]]
+ msg = "Freq was not given and was not set in the index"
+ with pytest.raises(ValueError, match=msg):
+ no_freq.tshift()
diff --git a/pandas/tests/series/methods/test_to_dict.py b/pandas/tests/series/methods/test_to_dict.py
new file mode 100644
index 0000000000000..0f1359f99e594
--- /dev/null
+++ b/pandas/tests/series/methods/test_to_dict.py
@@ -0,0 +1,20 @@
+import collections
+
+import pytest
+
+from pandas import Series
+import pandas.util.testing as tm
+
+
+class TestSeriesToDict:
+ @pytest.mark.parametrize(
+ "mapping", (dict, collections.defaultdict(list), collections.OrderedDict)
+ )
+ def test_to_dict(self, mapping, datetime_series):
+ # GH#16122
+ tm.assert_series_equal(
+ Series(datetime_series.to_dict(mapping), name="ts"), datetime_series
+ )
+ from_method = Series(datetime_series.to_dict(collections.Counter))
+ from_constructor = Series(collections.Counter(datetime_series.items()))
+ tm.assert_series_equal(from_method, from_constructor)
diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py
new file mode 100644
index 0000000000000..b2bf5e854fbcc
--- /dev/null
+++ b/pandas/tests/series/methods/test_truncate.py
@@ -0,0 +1,78 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas.util.testing as tm
+
+from pandas.tseries.offsets import BDay
+
+
+class TestTruncate:
+ def test_truncate(self, datetime_series):
+ offset = BDay()
+
+ ts = datetime_series[::3]
+
+ start, end = datetime_series.index[3], datetime_series.index[6]
+ start_missing, end_missing = datetime_series.index[2], datetime_series.index[7]
+
+ # neither specified
+ truncated = ts.truncate()
+ tm.assert_series_equal(truncated, ts)
+
+ # both specified
+ expected = ts[1:3]
+
+ truncated = ts.truncate(start, end)
+ tm.assert_series_equal(truncated, expected)
+
+ truncated = ts.truncate(start_missing, end_missing)
+ tm.assert_series_equal(truncated, expected)
+
+ # start specified
+ expected = ts[1:]
+
+ truncated = ts.truncate(before=start)
+ tm.assert_series_equal(truncated, expected)
+
+ truncated = ts.truncate(before=start_missing)
+ tm.assert_series_equal(truncated, expected)
+
+ # end specified
+ expected = ts[:3]
+
+ truncated = ts.truncate(after=end)
+ tm.assert_series_equal(truncated, expected)
+
+ truncated = ts.truncate(after=end_missing)
+ tm.assert_series_equal(truncated, expected)
+
+ # corner case, empty series returned
+ truncated = ts.truncate(after=datetime_series.index[0] - offset)
+ assert len(truncated) == 0
+
+ truncated = ts.truncate(before=datetime_series.index[-1] + offset)
+ assert len(truncated) == 0
+
+ msg = "Truncate: 1999-12-31 00:00:00 must be after 2000-02-14 00:00:00"
+ with pytest.raises(ValueError, match=msg):
+ ts.truncate(
+ before=datetime_series.index[-1] + offset,
+ after=datetime_series.index[0] - offset,
+ )
+
+ def test_truncate_nonsortedindex(self):
+ # GH#17935
+
+ s = pd.Series(["a", "b", "c", "d", "e"], index=[5, 3, 2, 9, 0])
+ msg = "truncate requires a sorted index"
+
+ with pytest.raises(ValueError, match=msg):
+ s.truncate(before=3, after=9)
+
+ rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
+ ts = pd.Series(np.random.randn(len(rng)), index=rng)
+ msg = "truncate requires a sorted index"
+
+ with pytest.raises(ValueError, match=msg):
+ ts.sort_values(ascending=False).truncate(before="2011-11", after="2011-12")
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index b48c79000c98d..f53081ac53b01 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -1,4 +1,3 @@
-import collections
from datetime import datetime
from io import StringIO
@@ -239,15 +238,3 @@ class SubclassedFrame(DataFrame):
assert isinstance(result, SubclassedFrame)
expected = SubclassedFrame({"X": [1, 2, 3]})
tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize(
- "mapping", (dict, collections.defaultdict(list), collections.OrderedDict)
- )
- def test_to_dict(self, mapping, datetime_series):
- # GH16122
- tm.assert_series_equal(
- Series(datetime_series.to_dict(mapping), name="ts"), datetime_series
- )
- from_method = Series(datetime_series.to_dict(collections.Counter))
- from_constructor = Series(collections.Counter(datetime_series.items()))
- tm.assert_series_equal(from_method, from_constructor)
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index 6d00b9f2b09df..b9bd7744d3f9c 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -7,24 +7,20 @@
from pandas._libs.tslib import iNaT
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
-from pandas.errors import NullFrequencyError
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
- Index,
+ DatetimeIndex,
NaT,
Series,
Timestamp,
concat,
date_range,
- offsets,
timedelta_range,
to_datetime,
)
-from pandas.core.indexes.datetimes import DatetimeIndex
-from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd
@@ -42,277 +38,6 @@ def assert_range_equal(left, right):
class TestTimeSeries:
- def test_shift(self, datetime_series):
- shifted = datetime_series.shift(1)
- unshifted = shifted.shift(-1)
-
- tm.assert_index_equal(shifted.index, datetime_series.index)
- tm.assert_index_equal(unshifted.index, datetime_series.index)
- tm.assert_numpy_array_equal(
- unshifted.dropna().values, datetime_series.values[:-1]
- )
-
- offset = BDay()
- shifted = datetime_series.shift(1, freq=offset)
- unshifted = shifted.shift(-1, freq=offset)
-
- tm.assert_series_equal(unshifted, datetime_series)
-
- unshifted = datetime_series.shift(0, freq=offset)
- tm.assert_series_equal(unshifted, datetime_series)
-
- shifted = datetime_series.shift(1, freq="B")
- unshifted = shifted.shift(-1, freq="B")
-
- tm.assert_series_equal(unshifted, datetime_series)
-
- # corner case
- unshifted = datetime_series.shift(0)
- tm.assert_series_equal(unshifted, datetime_series)
-
- # Shifting with PeriodIndex
- ps = tm.makePeriodSeries()
- shifted = ps.shift(1)
- unshifted = shifted.shift(-1)
- tm.assert_index_equal(shifted.index, ps.index)
- tm.assert_index_equal(unshifted.index, ps.index)
- tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
-
- shifted2 = ps.shift(1, "B")
- shifted3 = ps.shift(1, BDay())
- tm.assert_series_equal(shifted2, shifted3)
- tm.assert_series_equal(ps, shifted2.shift(-1, "B"))
-
- msg = "Given freq D does not match PeriodIndex freq B"
- with pytest.raises(ValueError, match=msg):
- ps.shift(freq="D")
-
- # legacy support
- shifted4 = ps.shift(1, freq="B")
- tm.assert_series_equal(shifted2, shifted4)
-
- shifted5 = ps.shift(1, freq=BDay())
- tm.assert_series_equal(shifted5, shifted4)
-
- # 32-bit taking
- # GH 8129
- index = date_range("2000-01-01", periods=5)
- for dtype in ["int32", "int64"]:
- s1 = Series(np.arange(5, dtype=dtype), index=index)
- p = s1.iloc[1]
- result = s1.shift(periods=p)
- expected = Series([np.nan, 0, 1, 2, 3], index=index)
- tm.assert_series_equal(result, expected)
-
- # xref 8260
- # with tz
- s = Series(
- date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
- )
- result = s - s.shift()
-
- exp = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
- tm.assert_series_equal(result, exp)
-
- # incompat tz
- s2 = Series(date_range("2000-01-01 09:00:00", periods=5, tz="CET"), name="foo")
- msg = "DatetimeArray subtraction must have the same timezones or no timezones"
- with pytest.raises(TypeError, match=msg):
- s - s2
-
- def test_shift2(self):
- ts = Series(
- np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
- )
-
- result = ts.shift(1, freq="5T")
- exp_index = ts.index.shift(1, freq="5T")
- tm.assert_index_equal(result.index, exp_index)
-
- # GH #1063, multiple of same base
- result = ts.shift(1, freq="4H")
- exp_index = ts.index + offsets.Hour(4)
- tm.assert_index_equal(result.index, exp_index)
-
- idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
- msg = "Cannot shift with no freq"
- with pytest.raises(NullFrequencyError, match=msg):
- idx.shift(1)
-
- def test_shift_fill_value(self):
- # GH #24128
- ts = Series(
- [1.0, 2.0, 3.0, 4.0, 5.0], index=date_range("1/1/2000", periods=5, freq="H")
- )
-
- exp = Series(
- [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("1/1/2000", periods=5, freq="H")
- )
- # check that fill value works
- result = ts.shift(1, fill_value=0.0)
- tm.assert_series_equal(result, exp)
-
- exp = Series(
- [0.0, 0.0, 1.0, 2.0, 3.0], index=date_range("1/1/2000", periods=5, freq="H")
- )
- result = ts.shift(2, fill_value=0.0)
- tm.assert_series_equal(result, exp)
-
- ts = pd.Series([1, 2, 3])
- res = ts.shift(2, fill_value=0)
- assert res.dtype == ts.dtype
-
- def test_categorical_shift_fill_value(self):
- ts = pd.Series(["a", "b", "c", "d"], dtype="category")
- res = ts.shift(1, fill_value="a")
- expected = pd.Series(
- pd.Categorical(
- ["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
- )
- )
- tm.assert_equal(res, expected)
-
- # check for incorrect fill_value
- msg = "'fill_value=f' is not present in this Categorical's categories"
- with pytest.raises(ValueError, match=msg):
- ts.shift(1, fill_value="f")
-
- def test_shift_dst(self):
- # GH 13926
- dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
- s = Series(dates)
-
- res = s.shift(0)
- tm.assert_series_equal(res, s)
- assert res.dtype == "datetime64[ns, US/Eastern]"
-
- res = s.shift(1)
- exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
- exp = Series(exp_vals)
- tm.assert_series_equal(res, exp)
- assert res.dtype == "datetime64[ns, US/Eastern]"
-
- res = s.shift(-2)
- exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
- exp = Series(exp_vals)
- tm.assert_series_equal(res, exp)
- assert res.dtype == "datetime64[ns, US/Eastern]"
-
- for ex in [10, -10, 20, -20]:
- res = s.shift(ex)
- exp = Series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
- tm.assert_series_equal(res, exp)
- assert res.dtype == "datetime64[ns, US/Eastern]"
-
- def test_tshift(self, datetime_series):
- # PeriodIndex
- ps = tm.makePeriodSeries()
- shifted = ps.tshift(1)
- unshifted = shifted.tshift(-1)
-
- tm.assert_series_equal(unshifted, ps)
-
- shifted2 = ps.tshift(freq="B")
- tm.assert_series_equal(shifted, shifted2)
-
- shifted3 = ps.tshift(freq=BDay())
- tm.assert_series_equal(shifted, shifted3)
-
- msg = "Given freq M does not match PeriodIndex freq B"
- with pytest.raises(ValueError, match=msg):
- ps.tshift(freq="M")
-
- # DatetimeIndex
- shifted = datetime_series.tshift(1)
- unshifted = shifted.tshift(-1)
-
- tm.assert_series_equal(datetime_series, unshifted)
-
- shifted2 = datetime_series.tshift(freq=datetime_series.index.freq)
- tm.assert_series_equal(shifted, shifted2)
-
- inferred_ts = Series(
- datetime_series.values, Index(np.asarray(datetime_series.index)), name="ts"
- )
- shifted = inferred_ts.tshift(1)
- unshifted = shifted.tshift(-1)
- tm.assert_series_equal(shifted, datetime_series.tshift(1))
- tm.assert_series_equal(unshifted, inferred_ts)
-
- no_freq = datetime_series[[0, 5, 7]]
- msg = "Freq was not given and was not set in the index"
- with pytest.raises(ValueError, match=msg):
- no_freq.tshift()
-
- def test_truncate(self, datetime_series):
- offset = BDay()
-
- ts = datetime_series[::3]
-
- start, end = datetime_series.index[3], datetime_series.index[6]
- start_missing, end_missing = datetime_series.index[2], datetime_series.index[7]
-
- # neither specified
- truncated = ts.truncate()
- tm.assert_series_equal(truncated, ts)
-
- # both specified
- expected = ts[1:3]
-
- truncated = ts.truncate(start, end)
- tm.assert_series_equal(truncated, expected)
-
- truncated = ts.truncate(start_missing, end_missing)
- tm.assert_series_equal(truncated, expected)
-
- # start specified
- expected = ts[1:]
-
- truncated = ts.truncate(before=start)
- tm.assert_series_equal(truncated, expected)
-
- truncated = ts.truncate(before=start_missing)
- tm.assert_series_equal(truncated, expected)
-
- # end specified
- expected = ts[:3]
-
- truncated = ts.truncate(after=end)
- tm.assert_series_equal(truncated, expected)
-
- truncated = ts.truncate(after=end_missing)
- tm.assert_series_equal(truncated, expected)
-
- # corner case, empty series returned
- truncated = ts.truncate(after=datetime_series.index[0] - offset)
- assert len(truncated) == 0
-
- truncated = ts.truncate(before=datetime_series.index[-1] + offset)
- assert len(truncated) == 0
-
- msg = "Truncate: 1999-12-31 00:00:00 must be after 2000-02-14 00:00:00"
- with pytest.raises(ValueError, match=msg):
- ts.truncate(
- before=datetime_series.index[-1] + offset,
- after=datetime_series.index[0] - offset,
- )
-
- def test_truncate_nonsortedindex(self):
- # GH 17935
-
- s = pd.Series(["a", "b", "c", "d", "e"], index=[5, 3, 2, 9, 0])
- msg = "truncate requires a sorted index"
-
- with pytest.raises(ValueError, match=msg):
- s.truncate(before=3, after=9)
-
- rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
- ts = pd.Series(np.random.randn(len(rng)), index=rng)
- msg = "truncate requires a sorted index"
-
- with pytest.raises(ValueError, match=msg):
- ts.sort_values(ascending=False).truncate(before="2011-11", after="2011-12")
-
def test_asfreq(self):
ts = Series(
[0.0, 1.0, 2.0],
@@ -730,6 +455,7 @@ def test_at_time(self):
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
+ # FIXME: dont leave commented-out
# expected.index = date_range('1/1/2000', '1/4/2000')
tm.assert_series_equal(result, expected)
| xref #30385, #30381. | https://api.github.com/repos/pandas-dev/pandas/pulls/30414 | 2019-12-22T22:55:13Z | 2019-12-23T14:02:36Z | 2019-12-23T14:02:36Z | 2019-12-23T15:31:25Z |
REF: algos_take_helper de-nest templating | diff --git a/pandas/_libs/algos_take_helper.pxi.in b/pandas/_libs/algos_take_helper.pxi.in
index 9dbae8170cbd0..420e08a3d68d4 100644
--- a/pandas/_libs/algos_take_helper.pxi.in
+++ b/pandas/_libs/algos_take_helper.pxi.in
@@ -10,69 +10,119 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
{{py:
-# c_type_in, c_type_out, preval, postval
+# c_type_in, c_type_out
dtypes = [
- ('uint8_t', 'uint8_t', '', ''),
- ('uint8_t', 'object', 'True if ', ' > 0 else False'),
- ('int8_t', 'int8_t', '', ''),
- ('int8_t', 'int32_t', '', ''),
- ('int8_t', 'int64_t', '', ''),
- ('int8_t', 'float64_t', '', ''),
- ('int16_t', 'int16_t', '', ''),
- ('int16_t', 'int32_t', '', ''),
- ('int16_t', 'int64_t', '', ''),
- ('int16_t', 'float64_t', '', ''),
- ('int32_t', 'int32_t', '', ''),
- ('int32_t', 'int64_t', '', ''),
- ('int32_t', 'float64_t', '', ''),
- ('int64_t', 'int64_t', '', ''),
- ('int64_t', 'float64_t', '', ''),
- ('float32_t', 'float32_t', '', ''),
- ('float32_t', 'float64_t', '', ''),
- ('float64_t', 'float64_t', '', ''),
- ('object', 'object', '', ''),
+ ('uint8_t', 'uint8_t'),
+ ('uint8_t', 'object'),
+ ('int8_t', 'int8_t'),
+ ('int8_t', 'int32_t'),
+ ('int8_t', 'int64_t'),
+ ('int8_t', 'float64_t'),
+ ('int16_t', 'int16_t'),
+ ('int16_t', 'int32_t'),
+ ('int16_t', 'int64_t'),
+ ('int16_t', 'float64_t'),
+ ('int32_t', 'int32_t'),
+ ('int32_t', 'int64_t'),
+ ('int32_t', 'float64_t'),
+ ('int64_t', 'int64_t'),
+ ('int64_t', 'float64_t'),
+ ('float32_t', 'float32_t'),
+ ('float32_t', 'float64_t'),
+ ('float64_t', 'float64_t'),
+ ('object', 'object'),
]
def get_dispatch(dtypes):
- inner_take_1d_template = """
+ for (c_type_in, c_type_out) in dtypes:
+
+ def get_name(dtype_name):
+ if dtype_name == "object":
+ return "object"
+ if dtype_name == "uint8_t":
+ return "bool"
+ return dtype_name[:-2]
+
+ name = get_name(c_type_in)
+ dest = get_name(c_type_out)
+
+ args = dict(name=name, dest=dest, c_type_in=c_type_in,
+ c_type_out=c_type_out)
+
+ yield (name, dest, c_type_in, c_type_out)
+
+}}
+
+
+{{for name, dest, c_type_in, c_type_out in get_dispatch(dtypes)}}
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+{{if c_type_in != "object"}}
+def take_1d_{{name}}_{{dest}}(const {{c_type_in}}[:] values,
+{{else}}
+def take_1d_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=1] values,
+{{endif}}
+ const int64_t[:] indexer,
+ {{c_type_out}}[:] out,
+ fill_value=np.nan):
+
cdef:
Py_ssize_t i, n, idx
- %(c_type_out)s fv
+ {{c_type_out}} fv
n = indexer.shape[0]
fv = fill_value
- %(nogil_str)s
- %(tab)sfor i in range(n):
- %(tab)s idx = indexer[i]
- %(tab)s if idx == -1:
- %(tab)s out[i] = fv
- %(tab)s else:
- %(tab)s out[i] = %(preval)svalues[idx]%(postval)s
-"""
+ {{if c_type_out != "object"}}
+ with nogil:
+ {{else}}
+ if True:
+ {{endif}}
+ for i in range(n):
+ idx = indexer[i]
+ if idx == -1:
+ out[i] = fv
+ else:
+ {{if c_type_in == "uint8_t" and c_type_out == "object"}}
+ out[i] = True if values[idx] > 0 else False
+ {{else}}
+ out[i] = values[idx]
+ {{endif}}
+
- inner_take_2d_axis0_template = """\
+@cython.wraparound(False)
+@cython.boundscheck(False)
+{{if c_type_in != "object"}}
+def take_2d_axis0_{{name}}_{{dest}}(const {{c_type_in}}[:, :] values,
+{{else}}
+def take_2d_axis0_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values,
+{{endif}}
+ ndarray[int64_t] indexer,
+ {{c_type_out}}[:, :] out,
+ fill_value=np.nan):
cdef:
Py_ssize_t i, j, k, n, idx
- %(c_type_out)s fv
+ {{c_type_out}} fv
n = len(indexer)
k = values.shape[1]
fv = fill_value
- IF %(can_copy)s:
+ IF {{True if c_type_in == c_type_out != "object" else False}}:
cdef:
- %(c_type_out)s *v
- %(c_type_out)s *o
+ {{c_type_out}} *v
+ {{c_type_out}} *o
- #GH3130
+ # GH#3130
if (values.strides[1] == out.strides[1] and
- values.strides[1] == sizeof(%(c_type_out)s) and
- sizeof(%(c_type_out)s) * n >= 256):
+ values.strides[1] == sizeof({{c_type_out}}) and
+ sizeof({{c_type_out}}) * n >= 256):
for i in range(n):
idx = indexer[i]
@@ -82,7 +132,7 @@ def get_dispatch(dtypes):
else:
v = &values[idx, 0]
o = &out[i, 0]
- memmove(o, v, <size_t>(sizeof(%(c_type_out)s) * k))
+ memmove(o, v, <size_t>(sizeof({{c_type_out}}) * k))
return
for i in range(n):
@@ -92,13 +142,27 @@ def get_dispatch(dtypes):
out[i, j] = fv
else:
for j in range(k):
- out[i, j] = %(preval)svalues[idx, j]%(postval)s
-"""
+ {{if c_type_in == "uint8_t" and c_type_out == "object"}}
+ out[i, j] = True if values[idx, j] > 0 else False
+ {{else}}
+ out[i, j] = values[idx, j]
+ {{endif}}
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+{{if c_type_in != "object"}}
+def take_2d_axis1_{{name}}_{{dest}}(const {{c_type_in}}[:, :] values,
+{{else}}
+def take_2d_axis1_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values,
+{{endif}}
+ ndarray[int64_t] indexer,
+ {{c_type_out}}[:, :] out,
+ fill_value=np.nan):
- inner_take_2d_axis1_template = """\
cdef:
Py_ssize_t i, j, k, n, idx
- %(c_type_out)s fv
+ {{c_type_out}} fv
n = len(values)
k = len(indexer)
@@ -114,132 +178,11 @@ def get_dispatch(dtypes):
if idx == -1:
out[i, j] = fv
else:
- out[i, j] = %(preval)svalues[i, idx]%(postval)s
-"""
-
- for (c_type_in, c_type_out, preval, postval) in dtypes:
-
- can_copy = c_type_in == c_type_out != "object"
- nogil = c_type_out != "object"
- if nogil:
- nogil_str = "with nogil:"
- tab = ' '
- else:
- nogil_str = ''
- tab = ''
-
- def get_name(dtype_name):
- if dtype_name == "object":
- return "object"
- if dtype_name == "uint8_t":
- return "bool"
- return dtype_name[:-2]
-
- name = get_name(c_type_in)
- dest = get_name(c_type_out)
-
- args = dict(name=name, dest=dest, c_type_in=c_type_in,
- c_type_out=c_type_out, preval=preval, postval=postval,
- can_copy=can_copy, nogil_str=nogil_str, tab=tab)
-
- inner_take_1d = inner_take_1d_template % args
- inner_take_2d_axis0 = inner_take_2d_axis0_template % args
- inner_take_2d_axis1 = inner_take_2d_axis1_template % args
-
- yield (name, dest, c_type_in, c_type_out, preval, postval,
- inner_take_1d, inner_take_2d_axis0, inner_take_2d_axis1)
-
-}}
-
-
-{{for name, dest, c_type_in, c_type_out, preval, postval,
- inner_take_1d, inner_take_2d_axis0, inner_take_2d_axis1
- in get_dispatch(dtypes)}}
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_1d_{{name}}_{{dest}}_memview({{c_type_in}}[:] values,
- const int64_t[:] indexer,
- {{c_type_out}}[:] out,
- fill_value=np.nan):
-
-
-{{inner_take_1d}}
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_1d_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=1] values,
- const int64_t[:] indexer,
- {{c_type_out}}[:] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_1d_{{name}}_{{dest}}_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-{{inner_take_1d}}
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis0_{{name}}_{{dest}}_memview({{c_type_in}}[:, :] values,
- const int64_t[:] indexer,
- {{c_type_out}}[:, :] out,
- fill_value=np.nan):
-{{inner_take_2d_axis0}}
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis0_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values,
- ndarray[int64_t] indexer,
- {{c_type_out}}[:, :] out,
- fill_value=np.nan):
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis0_{{name}}_{{dest}}_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-{{inner_take_2d_axis0}}
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef inline take_2d_axis1_{{name}}_{{dest}}_memview({{c_type_in}}[:, :] values,
- const int64_t[:] indexer,
- {{c_type_out}}[:, :] out,
- fill_value=np.nan):
-{{inner_take_2d_axis1}}
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def take_2d_axis1_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values,
- ndarray[int64_t] indexer,
- {{c_type_out}}[:, :] out,
- fill_value=np.nan):
-
- if values.flags.writeable:
- # We can call the memoryview version of the code
- take_2d_axis1_{{name}}_{{dest}}_memview(values, indexer, out,
- fill_value=fill_value)
- return
-
- # We cannot use the memoryview version on readonly-buffers due to
- # a limitation of Cython's typed memoryviews. Instead we can use
- # the slightly slower Cython ndarray type directly.
-{{inner_take_2d_axis1}}
+ {{if c_type_in == "uint8_t" and c_type_out == "object"}}
+ out[i, j] = True if values[i, idx] > 0 else False
+ {{else}}
+ out[i, j] = values[i, idx]
+ {{endif}}
@cython.wraparound(False)
@@ -268,7 +211,11 @@ def take_2d_multi_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values,
if idx1[j] == -1:
out[i, j] = fv
else:
- out[i, j] = {{preval}}values[idx, idx1[j]]{{postval}}
+ {{if c_type_in == "uint8_t" and c_type_out == "object"}}
+ out[i, j] = True if values[idx, idx1[j]] > 0 else False
+ {{else}}
+ out[i, j] = values[idx, idx1[j]]
+ {{endif}}
{{endfor}}
| Having two layers of templating here is really weird, this unravels one layer so this file is organized like our other tempita files. That makes future moves towards fused types and/or conditional nogil feasible. | https://api.github.com/repos/pandas-dev/pandas/pulls/30413 | 2019-12-22T22:05:35Z | 2019-12-24T13:44:47Z | 2019-12-24T13:44:47Z | 2019-12-24T16:46:20Z |
Fixing format string in pandas/tests/extension/list/array.py | diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py
index 0ca9fadb68829..6dd00ad3b06ba 100644
--- a/pandas/tests/extension/list/array.py
+++ b/pandas/tests/extension/list/array.py
@@ -36,7 +36,7 @@ def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
- raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string))
+ raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
class ListArray(ExtensionArray):
| This is a fix on pandas/tests/extension/list/array.py to utilize python3 format strings
([#29886](https://github.com/pandas-dev/pandas/issues/29886)). | https://api.github.com/repos/pandas-dev/pandas/pulls/30412 | 2019-12-22T21:50:41Z | 2019-12-23T01:14:18Z | 2019-12-23T01:14:18Z | 2019-12-23T01:14:27Z |
TYP: Typing annotations pandas/util/ | diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 2e91d76b0fe98..d10d3a1f71fe6 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -294,7 +294,6 @@ def update(self, *args, **kwargs) -> None:
"""
Update self.params with supplied args.
"""
-
if isinstance(self.params, dict):
self.params.update(*args, **kwargs)
diff --git a/pandas/util/_depr_module.py b/pandas/util/_depr_module.py
index 99dafdd760d26..5733663dd7ab3 100644
--- a/pandas/util/_depr_module.py
+++ b/pandas/util/_depr_module.py
@@ -4,11 +4,13 @@
"""
import importlib
+from typing import Iterable
import warnings
class _DeprecatedModule:
- """ Class for mocking deprecated modules.
+ """
+ Class for mocking deprecated modules.
Parameters
----------
@@ -34,7 +36,7 @@ def __init__(self, deprmod, deprmodto=None, removals=None, moved=None):
# For introspection purposes.
self.self_dir = frozenset(dir(type(self)))
- def __dir__(self):
+ def __dir__(self) -> Iterable[str]:
deprmodule = self._import_deprmod()
return dir(deprmodule)
diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py
index 91972fed7a3bb..8fd4566d7763b 100644
--- a/pandas/util/_doctools.py
+++ b/pandas/util/_doctools.py
@@ -1,3 +1,5 @@
+from typing import Optional, Tuple
+
import numpy as np
import pandas as pd
@@ -9,24 +11,27 @@ class TablePlotter:
Used in merging.rst
"""
- def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
+ def __init__(
+ self,
+ cell_width: float = 0.37,
+ cell_height: float = 0.25,
+ font_size: float = 7.5,
+ ):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
- def _shape(self, df):
+ def _shape(self, df: pd.DataFrame) -> Tuple[int, int]:
"""
Calculate table chape considering index levels.
"""
-
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
- def _get_cells(self, left, right, vertical):
+ def _get_cells(self, left, right, vertical) -> Tuple[int, int]:
"""
Calculate appropriate figure size based on left and right data.
"""
-
if vertical:
# calculate required number of cells
vcells = max(sum(self._shape(l)[0] for l in left), self._shape(right)[0])
@@ -36,7 +41,7 @@ def _get_cells(self, left, right, vertical):
hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]])
return hcells, vcells
- def plot(self, left, right, labels=None, vertical=True):
+ def plot(self, left, right, labels=None, vertical: bool = True):
"""
Plot left / right DataFrames in specified layout.
@@ -45,7 +50,7 @@ def plot(self, left, right, labels=None, vertical=True):
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
- vertical : bool
+ vertical : bool, default True
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
@@ -96,7 +101,9 @@ def plot(self, left, right, labels=None, vertical=True):
return fig
def _conv(self, data):
- """Convert each input to appropriate for table outplot"""
+ """
+ Convert each input to appropriate for table outplot.
+ """
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name="")
@@ -127,7 +134,7 @@ def _insert_index(self, data):
data.columns = col
return data
- def _make_table(self, ax, df, title, height=None):
+ def _make_table(self, ax, df, title: str, height: Optional[float] = None):
if df is None:
ax.set_visible(False)
return
diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py
index b8719154eb791..0723a37b1ba82 100644
--- a/pandas/util/_exceptions.py
+++ b/pandas/util/_exceptions.py
@@ -4,7 +4,9 @@
@contextlib.contextmanager
def rewrite_exception(old_name: str, new_name: str):
- """Rewrite the message of an exception."""
+ """
+ Rewrite the message of an exception.
+ """
try:
yield
except Exception as err:
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index b9f7e0c69f8b6..2801a2bf9c371 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -12,8 +12,9 @@
def get_sys_info() -> List[Tuple[str, Optional[Union[str, int]]]]:
- "Returns system information as a list"
-
+ """
+ Returns system information as a list
+ """
blob: List[Tuple[str, Optional[Union[str, int]]]] = []
# get full commit hash
@@ -123,7 +124,7 @@ def show_versions(as_json=False):
print(tpl.format(k=k, stat=stat))
-def main():
+def main() -> int:
from optparse import OptionParser
parser = OptionParser()
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index 0e3ea25bf6fdb..7e14ed27d5bd4 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -37,7 +37,7 @@ def test_foo():
from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR
-def safe_import(mod_name, min_version=None):
+def safe_import(mod_name: str, min_version: Optional[str] = None):
"""
Parameters:
-----------
@@ -110,7 +110,7 @@ def _skip_if_not_us_locale():
return True
-def _skip_if_no_scipy():
+def _skip_if_no_scipy() -> bool:
return not (
safe_import("scipy.stats")
and safe_import("scipy.sparse")
@@ -195,7 +195,9 @@ def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable:
)
-def skip_if_np_lt(ver_str, reason=None, *args, **kwds):
+def skip_if_np_lt(
+ ver_str: str, reason: Optional[str] = None, *args, **kwds
+) -> Callable:
if reason is None:
reason = f"NumPy {ver_str} or greater required"
return pytest.mark.skipif(
@@ -211,14 +213,14 @@ def parametrize_fixture_doc(*args):
initial fixture docstring by replacing placeholders {0}, {1} etc
with parameters passed as arguments.
- Parameters:
+ Parameters
----------
- args: iterable
- Positional arguments for docstring.
+ args: iterable
+ Positional arguments for docstring.
- Returns:
+ Returns
-------
- documented_fixture: function
+ function
The decorated function wrapped within a pytest
``parametrize_fixture_doc`` mark
"""
@@ -230,7 +232,7 @@ def documented_fixture(fixture):
return documented_fixture
-def check_file_leaks(func):
+def check_file_leaks(func) -> Callable:
"""
Decorate a test function tot check that we are not leaking file descriptors.
"""
diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py
index 6a3943cab692e..b299f3790ab22 100644
--- a/pandas/util/_tester.py
+++ b/pandas/util/_tester.py
@@ -1,5 +1,5 @@
"""
-Entrypoint for testing from the top-level namespace
+Entrypoint for testing from the top-level namespace.
"""
import os
import sys
@@ -22,7 +22,8 @@ def test(extra_args=None):
extra_args = [extra_args]
cmd = extra_args
cmd += [PKG]
- print(f"running: pytest {' '.join(cmd)}")
+ joined = " ".join(cmd)
+ print(f"running: pytest {joined}")
sys.exit(pytest.main(cmd))
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index 8b675a6b688fe..547fe748ae941 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -15,7 +15,6 @@ def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
-
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
@@ -38,7 +37,6 @@ def _check_for_default_values(fname, arg_val_dict, compat_args):
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
-
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
@@ -65,11 +63,8 @@ def _check_for_default_values(fname, arg_val_dict, compat_args):
if not match:
raise ValueError(
- (
- f"the '{key}' parameter is not "
- "supported in the pandas "
- f"implementation of {fname}()"
- )
+ f"the '{key}' parameter is not supported in "
+ f"the pandas implementation of {fname}()"
)
@@ -79,19 +74,18 @@ def validate_args(fname, args, max_fname_arg_count, compat_args):
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
- fname: str
+ Parameters
+ ----------
+ fname : str
The name of the function being passed the `*args` parameter
-
- args: tuple
+ args : tuple
The `*args` parameter passed into a function
-
- max_fname_arg_count: int
+ max_fname_arg_count : int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
-
- compat_args: OrderedDict
- A ordered dictionary of keys and their associated default values.
+ compat_args : Dict
+ An ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
@@ -101,10 +95,11 @@ def validate_args(fname, args, max_fname_arg_count, compat_args):
Raises
------
- TypeError if `args` contains more values than there are `compat_args`
- ValueError if `args` contains values that do not correspond to those
- of the default values specified in `compat_args`
-
+ TypeError
+ If `args` contains more values than there are `compat_args`
+ ValueError
+ If `args` contains values that do not correspond to those
+ of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
@@ -119,7 +114,6 @@ def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
-
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
@@ -139,12 +133,10 @@ def validate_kwargs(fname, kwargs, compat_args):
Parameters
----------
- fname: str
+ fname : str
The name of the function being passed the `**kwargs` parameter
-
- kwargs: dict
+ kwargs : dict
The `**kwargs` parameter passed into `fname`
-
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
@@ -154,7 +146,6 @@ def validate_kwargs(fname, kwargs, compat_args):
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
-
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
@@ -171,18 +162,14 @@ def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_ar
----------
fname: str
The name of the function being passed the `**kwargs` parameter
-
args: tuple
The `*args` parameter passed into a function
-
kwargs: dict
The `**kwargs` parameter passed into `fname`
-
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
-
compat_args: OrderedDict
A ordered dictionary of keys that `kwargs` is allowed to
have and their associated default values. Note that if there
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 6350b1075f4a0..c31cddc102afb 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -8,7 +8,7 @@
from shutil import rmtree
import string
import tempfile
-from typing import Union, cast
+from typing import List, Optional, Union, cast
import warnings
import zipfile
@@ -22,6 +22,7 @@
)
import pandas._libs.testing as _testing
+from pandas._typing import FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
@@ -97,11 +98,10 @@ def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
-
pd.reset_option("^display.", silent=True)
-def round_trip_pickle(obj, path=None):
+def round_trip_pickle(obj: FrameOrSeries, path: Optional[str] = None) -> FrameOrSeries:
"""
Pickle an object and then read it again.
@@ -114,10 +114,9 @@ def round_trip_pickle(obj, path=None):
Returns
-------
- round_trip_pickled_object : pandas object
+ pandas object
The original object that was pickled and then re-read.
"""
-
if path is None:
path = f"__{rands(10)}__.pickle"
with ensure_clean(path) as path:
@@ -125,7 +124,7 @@ def round_trip_pickle(obj, path=None):
return pd.read_pickle(path)
-def round_trip_pathlib(writer, reader, path=None):
+def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
@@ -140,10 +139,9 @@ def round_trip_pathlib(writer, reader, path=None):
Returns
-------
- round_trip_object : pandas object
+ pandas object
The original object that was serialized and then re-read.
"""
-
import pytest
Path = pytest.importorskip("pathlib").Path
@@ -155,9 +153,9 @@ def round_trip_pathlib(writer, reader, path=None):
return obj
-def round_trip_localpath(writer, reader, path=None):
+def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
- Write an object to file specified by a py.path LocalPath and read it back
+ Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
@@ -170,7 +168,7 @@ def round_trip_localpath(writer, reader, path=None):
Returns
-------
- round_trip_object : pandas object
+ pandas object
The original object that was serialized and then re-read.
"""
import pytest
@@ -187,21 +185,20 @@ def round_trip_localpath(writer, reader, path=None):
@contextmanager
def decompress_file(path, compression):
"""
- Open a compressed file and return a file object
+ Open a compressed file and return a file object.
Parameters
----------
path : str
- The path where the file is read from
+ The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
- f : file object
+ file object
"""
-
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
@@ -247,7 +244,6 @@ def write_to_compressed(compression, path, data, dest="test"):
------
ValueError : An invalid compression value was passed in.
"""
-
if compression == "zip":
import zipfile
@@ -279,7 +275,11 @@ def write_to_compressed(compression, path, data, dest="test"):
def assert_almost_equal(
- left, right, check_dtype="equiv", check_less_precise=False, **kwargs
+ left,
+ right,
+ check_dtype: Union[bool, str] = "equiv",
+ check_less_precise: Union[bool, int] = False,
+ **kwargs,
):
"""
Check that the left and right objects are approximately equal.
@@ -306,7 +306,6 @@ def assert_almost_equal(
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
-
if isinstance(left, pd.Index):
assert_index_equal(
left,
@@ -389,13 +388,13 @@ def _check_isinstance(left, right, cls):
)
-def assert_dict_equal(left, right, compare_keys=True):
+def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
-def randbool(size=(), p=0.5):
+def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
@@ -407,7 +406,9 @@ def randbool(size=(), p=0.5):
def rands_array(nchars, size, dtype="O"):
- """Generate an array of byte strings."""
+ """
+ Generate an array of byte strings.
+ """
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
@@ -420,7 +421,9 @@ def rands_array(nchars, size, dtype="O"):
def randu_array(nchars, size, dtype="O"):
- """Generate an array of unicode strings."""
+ """
+ Generate an array of unicode strings.
+ """
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
@@ -468,7 +471,8 @@ def close(fignum=None):
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
- """Gets a temporary path and agrees to remove on close.
+ """
+ Gets a temporary path and agrees to remove on close.
Parameters
----------
@@ -553,8 +557,9 @@ def ensure_safe_environment_variables():
# Comparators
-def equalContents(arr1, arr2):
- """Checks if the set of unique elements of arr1 and arr2 are equivalent.
+def equalContents(arr1, arr2) -> bool:
+ """
+ Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
@@ -691,8 +696,10 @@ def _get_ilevel_values(index, level):
assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
-def assert_class_equal(left, right, exact=True, obj="Input"):
- """checks classes are equal."""
+def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
+ """
+ Checks classes are equal.
+ """
__tracebackhide__ = True
def repr_class(x):
@@ -2641,8 +2648,9 @@ def _constructor(self):
@contextmanager
-def set_timezone(tz):
- """Context manager for temporarily setting a timezone.
+def set_timezone(tz: str):
+ """
+ Context manager for temporarily setting a timezone.
Parameters
----------
@@ -2685,7 +2693,8 @@ def setTZ(tz):
def _make_skipna_wrapper(alternative, skipna_alternative=None):
- """Create a function for calling on an array.
+ """
+ Create a function for calling on an array.
Parameters
----------
@@ -2697,7 +2706,7 @@ def _make_skipna_wrapper(alternative, skipna_alternative=None):
Returns
-------
- skipna_wrapper : function
+ function
"""
if skipna_alternative:
@@ -2715,7 +2724,7 @@ def skipna_wrapper(x):
return skipna_wrapper
-def convert_rows_list_to_csv_str(rows_list):
+def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
@@ -2723,13 +2732,13 @@ def convert_rows_list_to_csv_str(rows_list):
Parameters
----------
- rows_list : list
- The list of string. Each element represents the row of csv.
+ rows_list : List[str]
+ Each element represents the row of csv.
Returns
-------
- expected : string
- Expected output of to_csv() in current OS
+ str
+ Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30411 | 2019-12-22T21:44:46Z | 2019-12-24T16:40:04Z | 2019-12-24T16:40:04Z | 2019-12-24T16:42:21Z |
CLN: str.format -> f-strings for `io/sas` | diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index eb57d703cd4d5..c6a28c1fa813d 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -169,7 +169,7 @@ def _get_properties(self):
if buf in const.encoding_names:
self.file_encoding = const.encoding_names[buf]
else:
- self.file_encoding = "unknown (code={name!s})".format(name=buf)
+ self.file_encoding = f"unknown (code={buf})"
# Get platform information
buf = self._read_bytes(const.platform_offset, const.platform_length)
@@ -293,8 +293,8 @@ def _read_bytes(self, offset, length):
buf = self._path_or_buf.read(length)
if len(buf) < length:
self.close()
- msg = "Unable to read {:d} bytes from file position {:d}."
- raise ValueError(msg.format(length, offset))
+ msg = f"Unable to read {length:d} bytes from file position {offset:d}."
+ raise ValueError(msg)
return buf
else:
if offset + length > len(self._cached_page):
@@ -457,12 +457,9 @@ def _process_columnsize_subheader(self, offset, length):
self.column_count = self._read_int(offset, int_len)
if self.col_count_p1 + self.col_count_p2 != self.column_count:
print(
- "Warning: column count mismatch ({p1} + {p2} != "
- "{column_count})\n".format(
- p1=self.col_count_p1,
- p2=self.col_count_p2,
- column_count=self.column_count,
- )
+ f"Warning: column count mismatch ({self.col_count_p1} + "
+ f"{self.col_count_p2} != "
+ f"{self.column_count})\n"
)
# Unknown purpose
@@ -672,8 +669,12 @@ def _read_next_page(self):
return True
elif len(self._cached_page) != self._page_length:
self.close()
- msg = "failed to read complete page from file (read {:d} of {:d} bytes)"
- raise ValueError(msg.format(len(self._cached_page), self._page_length))
+ msg = (
+ "failed to read complete page from file (read "
+ f"{len(self._cached_page):d} of "
+ f"{self._page_length:d} bytes)"
+ )
+ raise ValueError(msg)
self._read_page_header()
page_type = self._current_page_type
@@ -725,8 +726,6 @@ def _chunk_to_dataframe(self):
js += 1
else:
self.close()
- raise ValueError(
- "unknown column type {type}".format(type=self._column_types[j])
- )
+ raise ValueError(f"unknown column type {self._column_types[j]}")
return rslt
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 9aa8ed1dfeb5d..e4a06c794271d 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -367,8 +367,8 @@ def _read_header(self):
fl = field["field_length"]
if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):
self.close()
- msg = "Floating field width {0} is not between 2 and 8."
- raise TypeError(msg.format(fl))
+ msg = f"Floating field width {fl} is not between 2 and 8."
+ raise TypeError(msg)
for k, v in field.items():
try:
diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py
index 49af18d2935ef..d3480b246b91f 100644
--- a/pandas/tests/io/sas/test_sas7bdat.py
+++ b/pandas/tests/io/sas/test_sas7bdat.py
@@ -21,7 +21,7 @@ def setup_method(self, datapath):
self.data = []
self.test_ix = [list(range(1, 16)), [16]]
for j in 1, 2:
- fname = os.path.join(self.dirpath, "test_sas7bdat_{j}.csv".format(j=j))
+ fname = os.path.join(self.dirpath, f"test_sas7bdat_{j}.csv")
df = pd.read_csv(fname)
epoch = pd.datetime(1960, 1, 1)
t1 = pd.to_timedelta(df["Column4"], unit="d")
@@ -38,7 +38,7 @@ def test_from_file(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
- fname = os.path.join(self.dirpath, "test{k}.sas7bdat".format(k=k))
+ fname = os.path.join(self.dirpath, f"test{k}.sas7bdat")
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, df0)
@@ -46,7 +46,7 @@ def test_from_buffer(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
- fname = os.path.join(self.dirpath, "test{k}.sas7bdat".format(k=k))
+ fname = os.path.join(self.dirpath, f"test{k}.sas7bdat")
with open(fname, "rb") as f:
byts = f.read()
buf = io.BytesIO(byts)
@@ -61,7 +61,7 @@ def test_from_iterator(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
- fname = os.path.join(self.dirpath, "test{k}.sas7bdat".format(k=k))
+ fname = os.path.join(self.dirpath, f"test{k}.sas7bdat")
rdr = pd.read_sas(fname, iterator=True, encoding="utf-8")
df = rdr.read(2)
tm.assert_frame_equal(df, df0.iloc[0:2, :])
@@ -73,7 +73,7 @@ def test_path_pathlib(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
- fname = Path(os.path.join(self.dirpath, "test{k}.sas7bdat".format(k=k)))
+ fname = Path(os.path.join(self.dirpath, f"test{k}.sas7bdat"))
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, df0)
@@ -84,9 +84,7 @@ def test_path_localpath(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
- fname = LocalPath(
- os.path.join(self.dirpath, "test{k}.sas7bdat".format(k=k))
- )
+ fname = LocalPath(os.path.join(self.dirpath, f"test{k}.sas7bdat"))
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, df0)
@@ -95,7 +93,7 @@ def test_iterator_loop(self):
for j in 0, 1:
for k in self.test_ix[j]:
for chunksize in 3, 5, 10, 11:
- fname = os.path.join(self.dirpath, "test{k}.sas7bdat".format(k=k))
+ fname = os.path.join(self.dirpath, f"test{k}.sas7bdat")
rdr = pd.read_sas(fname, chunksize=10, encoding="utf-8")
y = 0
for x in rdr:
@@ -106,7 +104,7 @@ def test_iterator_loop(self):
def test_iterator_read_too_much(self):
# github #14734
k = self.test_ix[0][0]
- fname = os.path.join(self.dirpath, "test{k}.sas7bdat".format(k=k))
+ fname = os.path.join(self.dirpath, f"test{k}.sas7bdat")
rdr = pd.read_sas(fname, format="sas7bdat", iterator=True, encoding="utf-8")
d1 = rdr.read(rdr.row_count + 20)
rdr.close()
| - [x] ref #29547
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30409 | 2019-12-22T19:32:02Z | 2019-12-24T19:56:37Z | 2019-12-24T19:56:37Z | 2020-06-23T17:09:05Z |
ENH: Add ignore_index for df.drop_duplicates | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 9023cf2ab1b4f..e77beb2943101 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -208,6 +208,7 @@ Other enhancements
- :func:`to_parquet` now appropriately handles the ``schema`` argument for user defined schemas in the pyarrow engine. (:issue: `30270`)
- DataFrame constructor preserve `ExtensionArray` dtype with `ExtensionArray` (:issue:`11363`)
+- :meth:`DataFrame.drop_duplicates` has gained ``ignore_index`` keyword to reset index (:issue:`30114`)
Build Changes
^^^^^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index dfda1470413b7..e8b4b292163e6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4587,6 +4587,7 @@ def drop_duplicates(
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
+ ignore_index: bool = False,
) -> Optional["DataFrame"]:
"""
Return DataFrame with duplicate rows removed.
@@ -4606,6 +4607,10 @@ def drop_duplicates(
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
+ ignore_index : bool, default False
+ If True, the resulting axis will be labeled 0, 1, …, n - 1.
+
+ .. versionadded:: 1.0.0
Returns
-------
@@ -4621,9 +4626,16 @@ def drop_duplicates(
if inplace:
(inds,) = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
+
+ if ignore_index:
+ new_data.axes[1] = ibase.default_index(len(inds))
self._update_inplace(new_data)
else:
- return self[-duplicated]
+ result = self[-duplicated]
+
+ if ignore_index:
+ result.index = ibase.default_index(len(result))
+ return result
return None
diff --git a/pandas/tests/frame/methods/test_drop_duplicates.py b/pandas/tests/frame/methods/test_drop_duplicates.py
index a7715d1f31673..29ab2e1bfd512 100644
--- a/pandas/tests/frame/methods/test_drop_duplicates.py
+++ b/pandas/tests/frame/methods/test_drop_duplicates.py
@@ -391,3 +391,36 @@ def test_drop_duplicates_inplace():
expected = orig2.drop_duplicates(["A", "B"], keep=False)
result = df2
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "origin_dict, output_dict, ignore_index, output_index",
+ [
+ ({"A": [2, 2, 3]}, {"A": [2, 3]}, True, [0, 1]),
+ ({"A": [2, 2, 3]}, {"A": [2, 3]}, False, [0, 2]),
+ ({"A": [2, 2, 3], "B": [2, 2, 4]}, {"A": [2, 3], "B": [2, 4]}, True, [0, 1]),
+ ({"A": [2, 2, 3], "B": [2, 2, 4]}, {"A": [2, 3], "B": [2, 4]}, False, [0, 2]),
+ ],
+)
+def test_drop_duplicates_ignore_index(
+ origin_dict, output_dict, ignore_index, output_index
+):
+ # GH 30114
+ df = DataFrame(origin_dict)
+ expected = DataFrame(output_dict, index=output_index)
+
+ # Test when inplace is False
+ result = df.drop_duplicates(ignore_index=ignore_index)
+ tm.assert_frame_equal(result, expected)
+
+ # to verify original dataframe is not mutated
+ tm.assert_frame_equal(df, DataFrame(origin_dict))
+
+ # Test when inplace is True
+ copied_df = df.copy()
+
+ copied_df.drop_duplicates(ignore_index=ignore_index, inplace=True)
+ tm.assert_frame_equal(copied_df, expected)
+
+ # to verify that input is unchanged
+ tm.assert_frame_equal(df, DataFrame(origin_dict))
| - [x] xref #30114
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30405 | 2019-12-22T15:51:01Z | 2019-12-27T16:33:11Z | 2019-12-27T16:33:11Z | 2019-12-27T16:33:16Z |
TYP: Type annotations in pandas/io/formats/printing.py | diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index 3a3347a5c86ea..b88478b3da181 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -109,12 +109,12 @@ def write_th(
----------
s : object
The data to be written inside the cell.
- header : boolean, default False
+ header : bool, default False
Set to True if the <th> is for use inside <thead>. This will
cause min-width to be set if there is one.
indent : int, default 0
The indentation level of the cell.
- tags : string, default None
+ tags : str, default None
Tags to include in the cell.
Returns
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 2176487ff6a36..4b5b5e9a0ce15 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -1,5 +1,5 @@
"""
-printing tools
+Printing tools.
"""
import sys
@@ -182,13 +182,12 @@ def pprint_thing(
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
- max_seq_items : False, int, default None
- Pass thru to other pretty printers to limit sequence printing
+ max_seq_items : int or None, default None
+ Pass through to other pretty printers to limit sequence printing
Returns
-------
str
-
"""
def as_escaped_string(
@@ -312,7 +311,6 @@ def format_object_summary(
Returns
-------
summary string
-
"""
from pandas.io.formats.console import get_console_size
from pandas.io.formats.format import _get_adjustment
@@ -346,7 +344,9 @@ def format_object_summary(
# adj can optionally handle unicode eastern asian width
adj = _get_adjustment()
- def _extend_line(s, line, value, display_width, next_line_prefix):
+ def _extend_line(
+ s: str, line: str, value: str, display_width: int, next_line_prefix: str
+ ) -> Tuple[str, str]:
if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width:
s += line.rstrip()
@@ -354,7 +354,7 @@ def _extend_line(s, line, value, display_width, next_line_prefix):
line += value
return s, line
- def best_len(values):
+ def best_len(values: List[str]) -> int:
if values:
return max(adj.len(x) for x in values)
else:
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30404 | 2019-12-22T13:35:34Z | 2019-12-23T15:28:51Z | 2019-12-23T15:28:51Z | 2019-12-24T15:21:04Z |
TYP: Type annotations in pandas/io/formats/style.py | diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 9b0f100c1b041..b0e8e4033edf2 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -1,14 +1,17 @@
-"""Utilities for conversion to writer-agnostic Excel representation
+"""
+Utilities for conversion to writer-agnostic Excel representation.
"""
from functools import reduce
import itertools
import re
-from typing import Callable, Dict, List, Optional, Sequence, Union
+from typing import Callable, Dict, Optional, Sequence, Union
import warnings
import numpy as np
+from pandas._typing import Label
+
from pandas.core.dtypes import missing
from pandas.core.dtypes.common import is_float, is_scalar
from pandas.core.dtypes.generic import ABCMultiIndex, ABCPeriodIndex
@@ -371,10 +374,10 @@ def __init__(
df,
na_rep: str = "",
float_format: Optional[str] = None,
- cols: Optional[Sequence] = None,
- header: Union[bool, List[str]] = True,
+ cols: Optional[Sequence[Label]] = None,
+ header: Union[Sequence[Label], bool] = True,
index: bool = True,
- index_label: Union[str, Sequence, None] = None,
+ index_label: Optional[Union[Label, Sequence[Label]]] = None,
merge_cells: bool = False,
inf_rep: str = "inf",
style_converter: Optional[Callable] = None,
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 8570875569e44..4f2430b6c8568 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1,6 +1,5 @@
"""
-Module for applying conditional formatting to
-DataFrames and Series.
+Module for applying conditional formatting to DataFrames and Series.
"""
from collections import defaultdict
@@ -8,7 +7,17 @@
import copy
from functools import partial
from itertools import product
-from typing import Any, Callable, DefaultDict, Dict, List, Optional, Sequence, Tuple
+from typing import (
+ Any,
+ Callable,
+ DefaultDict,
+ Dict,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
from uuid import uuid1
import numpy as np
@@ -16,6 +25,7 @@
from pandas._config import get_option
from pandas._libs import lib
+from pandas._typing import Axis, FrameOrSeries, FrameOrSeriesUnion, Label
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import Appender
@@ -24,6 +34,7 @@
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
+from pandas.core.frame import DataFrame
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
@@ -41,7 +52,7 @@
@contextmanager
-def _mpl(func):
+def _mpl(func: Callable):
if has_mpl:
yield plt, colors
else:
@@ -125,13 +136,13 @@ class Styler:
def __init__(
self,
- data,
- precision=None,
- table_styles=None,
- uuid=None,
- caption=None,
- table_attributes=None,
- cell_ids=True,
+ data: FrameOrSeriesUnion,
+ precision: Optional[int] = None,
+ table_styles: Optional[List[Dict[str, List[Tuple[str, str]]]]] = None,
+ uuid: Optional[str] = None,
+ caption: Optional[str] = None,
+ table_attributes: Optional[str] = None,
+ cell_ids: bool = True,
na_rep: Optional[str] = None,
):
self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list)
@@ -175,7 +186,7 @@ def default_display_func(x):
Tuple[int, int], Callable[[Any], str]
] = defaultdict(lambda: default_display_func)
- def _repr_html_(self):
+ def _repr_html_(self) -> str:
"""
Hooks into Jupyter notebook rich display system.
"""
@@ -196,22 +207,22 @@ def _repr_html_(self):
def to_excel(
self,
excel_writer,
- sheet_name="Sheet1",
- na_rep="",
- float_format=None,
- columns=None,
- header=True,
- index=True,
- index_label=None,
- startrow=0,
- startcol=0,
- engine=None,
- merge_cells=True,
- encoding=None,
- inf_rep="inf",
- verbose=True,
- freeze_panes=None,
- ):
+ sheet_name: str = "Sheet1",
+ na_rep: str = "",
+ float_format: Optional[str] = None,
+ columns: Optional[Sequence[Label]] = None,
+ header: Union[Sequence[Label], bool] = True,
+ index: bool = True,
+ index_label: Optional[Union[Label, Sequence[Label]]] = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ engine: Optional[str] = None,
+ merge_cells: bool = True,
+ encoding: Optional[str] = None,
+ inf_rep: str = "inf",
+ verbose: bool = True,
+ freeze_panes: Optional[Tuple[int, int]] = None,
+ ) -> None:
from pandas.io.formats.excel import ExcelFormatter
@@ -423,7 +434,7 @@ def format_attr(pair):
table_attributes=table_attr,
)
- def format(self, formatter, subset=None, na_rep: Optional[str] = None):
+ def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> "Styler":
"""
Format the text display value of cells.
@@ -496,7 +507,7 @@ def format(self, formatter, subset=None, na_rep: Optional[str] = None):
self._display_funcs[(i, j)] = formatter
return self
- def render(self, **kwargs):
+ def render(self, **kwargs) -> str:
"""
Render the built up styles to HTML.
@@ -545,16 +556,18 @@ def render(self, **kwargs):
d.update(kwargs)
return self.template.render(**d)
- def _update_ctx(self, attrs):
+ def _update_ctx(self, attrs: DataFrame) -> None:
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
- attrs : Series or DataFrame
- should contain strings of '<property>: <value>;<prop2>: <val2>'
- Whitespace shouldn't matter and the final trailing ';' shouldn't
- matter.
+ Parameters
+ ----------
+ attrs : DataFrame
+ should contain strings of '<property>: <value>;<prop2>: <val2>'
+ Whitespace shouldn't matter and the final trailing ';' shouldn't
+ matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.items():
@@ -563,7 +576,7 @@ def _update_ctx(self, attrs):
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
- def _copy(self, deepcopy=False):
+ def _copy(self, deepcopy: bool = False) -> "Styler":
styler = Styler(
self.data,
precision=self.precision,
@@ -580,16 +593,16 @@ def _copy(self, deepcopy=False):
styler._todo = self._todo
return styler
- def __copy__(self):
+ def __copy__(self) -> "Styler":
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
- def __deepcopy__(self, memo):
+ def __deepcopy__(self, memo) -> "Styler":
return self._copy(deepcopy=True)
- def clear(self):
+ def clear(self) -> None:
"""
Reset the styler, removing any previously applied styles.
@@ -612,7 +625,13 @@ def _compute(self):
r = func(self)(*args, **kwargs)
return r
- def _apply(self, func, axis=0, subset=None, **kwargs):
+ def _apply(
+ self,
+ func: Callable[..., "Styler"],
+ axis: Optional[Axis] = 0,
+ subset=None,
+ **kwargs,
+ ) -> "Styler":
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
@@ -645,7 +664,13 @@ def _apply(self, func, axis=0, subset=None, **kwargs):
self._update_ctx(result)
return self
- def apply(self, func, axis=0, subset=None, **kwargs):
+ def apply(
+ self,
+ func: Callable[..., "Styler"],
+ axis: Optional[Axis] = 0,
+ subset=None,
+ **kwargs,
+ ) -> "Styler":
"""
Apply a function column-wise, row-wise, or table-wise.
@@ -696,7 +721,7 @@ def apply(self, func, axis=0, subset=None, **kwargs):
)
return self
- def _applymap(self, func, subset=None, **kwargs):
+ def _applymap(self, func: Callable, subset=None, **kwargs) -> "Styler":
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
@@ -705,7 +730,7 @@ def _applymap(self, func, subset=None, **kwargs):
self._update_ctx(result)
return self
- def applymap(self, func, subset=None, **kwargs):
+ def applymap(self, func: Callable, subset=None, **kwargs) -> "Styler":
"""
Apply a function elementwise.
@@ -734,7 +759,14 @@ def applymap(self, func, subset=None, **kwargs):
)
return self
- def where(self, cond, value, other=None, subset=None, **kwargs):
+ def where(
+ self,
+ cond: Callable,
+ value: str,
+ other: Optional[str] = None,
+ subset=None,
+ **kwargs,
+ ) -> "Styler":
"""
Apply a function elementwise.
@@ -773,7 +805,7 @@ def where(self, cond, value, other=None, subset=None, **kwargs):
lambda val: value if cond(val) else other, subset=subset, **kwargs
)
- def set_precision(self, precision):
+ def set_precision(self, precision: int) -> "Styler":
"""
Set the precision used to render.
@@ -788,7 +820,7 @@ def set_precision(self, precision):
self.precision = precision
return self
- def set_table_attributes(self, attributes):
+ def set_table_attributes(self, attributes: str) -> "Styler":
"""
Set the table attributes.
@@ -812,7 +844,7 @@ def set_table_attributes(self, attributes):
self.table_attributes = attributes
return self
- def export(self):
+ def export(self) -> List[Tuple[Callable, Tuple, Dict]]:
"""
Export the styles to applied to the current Styler.
@@ -828,7 +860,7 @@ def export(self):
"""
return self._todo
- def use(self, styles):
+ def use(self, styles: List[Tuple[Callable, Tuple, Dict]]) -> "Styler":
"""
Set the styles on the current Styler.
@@ -850,7 +882,7 @@ def use(self, styles):
self._todo.extend(styles)
return self
- def set_uuid(self, uuid):
+ def set_uuid(self, uuid: str) -> "Styler":
"""
Set the uuid for a Styler.
@@ -865,7 +897,7 @@ def set_uuid(self, uuid):
self.uuid = uuid
return self
- def set_caption(self, caption):
+ def set_caption(self, caption: str) -> "Styler":
"""
Set the caption on a Styler.
@@ -880,7 +912,7 @@ def set_caption(self, caption):
self.caption = caption
return self
- def set_table_styles(self, table_styles):
+ def set_table_styles(self, table_styles) -> "Styler":
"""
Set the table styles on a Styler.
@@ -927,7 +959,7 @@ def set_na_rep(self, na_rep: str) -> "Styler":
self.na_rep = na_rep
return self
- def hide_index(self):
+ def hide_index(self) -> "Styler":
"""
Hide any indices from rendering.
@@ -940,7 +972,7 @@ def hide_index(self):
self.hidden_index = True
return self
- def hide_columns(self, subset):
+ def hide_columns(self, subset) -> "Styler":
"""
Hide columns from rendering.
@@ -966,10 +998,10 @@ def hide_columns(self, subset):
# -----------------------------------------------------------------------
@staticmethod
- def _highlight_null(v, null_color):
+ def _highlight_null(v, null_color: str) -> str:
return f"background-color: {null_color}" if pd.isna(v) else ""
- def highlight_null(self, null_color="red"):
+ def highlight_null(self, null_color: str = "red") -> "Styler":
"""
Shade the background ``null_color`` for missing values.
@@ -987,14 +1019,14 @@ def highlight_null(self, null_color="red"):
def background_gradient(
self,
cmap="PuBu",
- low=0,
- high=0,
- axis=0,
+ low: float = 0,
+ high: float = 0,
+ axis: Optional[Axis] = 0,
subset=None,
- text_color_threshold=0.408,
+ text_color_threshold: float = 0.408,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
- ):
+ ) -> "Styler":
"""
Color the background in a gradient style.
@@ -1069,9 +1101,9 @@ def background_gradient(
def _background_gradient(
s,
cmap="PuBu",
- low=0,
- high=0,
- text_color_threshold=0.408,
+ low: float = 0,
+ high: float = 0,
+ text_color_threshold: float = 0.408,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
):
@@ -1095,7 +1127,7 @@ def _background_gradient(
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.to_numpy(dtype=float)))
- def relative_luminance(rgba):
+ def relative_luminance(rgba) -> float:
"""
Calculate relative luminance of a color.
@@ -1117,7 +1149,7 @@ def relative_luminance(rgba):
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
- def css(rgba):
+ def css(rgba) -> str:
dark = relative_luminance(rgba) < text_color_threshold
text_color = "#f1f1f1" if dark else "#000000"
return f"background-color: {colors.rgb2hex(rgba)};color: {text_color};"
@@ -1131,7 +1163,7 @@ def css(rgba):
columns=s.columns,
)
- def set_properties(self, subset=None, **kwargs):
+ def set_properties(self, subset=None, **kwargs) -> "Styler":
"""
Method to set one or more non-data dependent properties or each cell.
@@ -1157,7 +1189,14 @@ def set_properties(self, subset=None, **kwargs):
return self.applymap(f, subset=subset)
@staticmethod
- def _bar(s, align, colors, width=100, vmin=None, vmax=None):
+ def _bar(
+ s,
+ align: str,
+ colors: List[str],
+ width: float = 100,
+ vmin: Optional[float] = None,
+ vmax: Optional[float] = None,
+ ):
"""
Draw bar chart in dataframe cells.
"""
@@ -1175,7 +1214,7 @@ def _bar(s, align, colors, width=100, vmin=None, vmax=None):
normed = width * (s.to_numpy(dtype=float) - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
- def css_bar(start, end, color):
+ def css_bar(start: float, end: float, color: str) -> str:
"""
Generate CSS code to draw a bar from start to end.
"""
@@ -1212,13 +1251,13 @@ def css(x):
def bar(
self,
subset=None,
- axis=0,
+ axis: Optional[Axis] = 0,
color="#d65f5f",
- width=100,
- align="left",
- vmin=None,
- vmax=None,
- ):
+ width: float = 100,
+ align: str = "left",
+ vmin: Optional[float] = None,
+ vmax: Optional[float] = None,
+ ) -> "Styler":
"""
Draw bar chart in the cell backgrounds.
@@ -1293,7 +1332,9 @@ def bar(
return self
- def highlight_max(self, subset=None, color="yellow", axis=0):
+ def highlight_max(
+ self, subset=None, color: str = "yellow", axis: Optional[Axis] = 0
+ ) -> "Styler":
"""
Highlight the maximum by shading the background.
@@ -1313,7 +1354,9 @@ def highlight_max(self, subset=None, color="yellow", axis=0):
"""
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
- def highlight_min(self, subset=None, color="yellow", axis=0):
+ def highlight_min(
+ self, subset=None, color: str = "yellow", axis: Optional[Axis] = 0
+ ) -> "Styler":
"""
Highlight the minimum by shading the background.
@@ -1335,7 +1378,13 @@ def highlight_min(self, subset=None, color="yellow", axis=0):
subset=subset, color=color, axis=axis, max_=False
)
- def _highlight_handler(self, subset=None, color="yellow", axis=None, max_=True):
+ def _highlight_handler(
+ self,
+ subset=None,
+ color: str = "yellow",
+ axis: Optional[Axis] = None,
+ max_: bool = True,
+ ) -> "Styler":
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(
self._highlight_extrema, color=color, axis=axis, subset=subset, max_=max_
@@ -1343,7 +1392,9 @@ def _highlight_handler(self, subset=None, color="yellow", axis=None, max_=True):
return self
@staticmethod
- def _highlight_extrema(data, color="yellow", max_=True):
+ def _highlight_extrema(
+ data: FrameOrSeries, color: str = "yellow", max_: bool = True
+ ):
"""
Highlight the min or max in a Series or DataFrame.
"""
@@ -1388,7 +1439,7 @@ class MyStyler(cls):
return MyStyler
- def pipe(self, func, *args, **kwargs):
+ def pipe(self, func: Callable, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
@@ -1460,7 +1511,7 @@ def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
-def _is_visible(idx_row, idx_col, lengths):
+def _is_visible(idx_row, idx_col, lengths) -> bool:
"""
Index -> {(idx_row, idx_col): bool}).
"""
@@ -1510,7 +1561,9 @@ def _get_level_lengths(index, hidden_elements=None):
return non_zero_lengths
-def _maybe_wrap_formatter(formatter, na_rep: Optional[str]):
+def _maybe_wrap_formatter(
+ formatter: Union[Callable, str], na_rep: Optional[str]
+) -> Callable:
if isinstance(formatter, str):
formatter_func = lambda x: formatter.format(x)
elif callable(formatter):
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30403 | 2019-12-22T13:19:42Z | 2020-01-20T17:31:09Z | 2020-01-20T17:31:09Z | 2020-01-20T20:03:41Z |
ENH: Add ignore_index for df.sort_values and series.sort_values | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 1253788d7ff27..96f35e25331c0 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -207,6 +207,7 @@ Other enhancements
- The ``partition_cols`` argument in :meth:`DataFrame.to_parquet` now accepts a string (:issue:`27117`)
- :func:`to_parquet` now appropriately handles the ``schema`` argument for user defined schemas in the pyarrow engine. (:issue: `30270`)
- DataFrame constructor preserve `ExtensionArray` dtype with `ExtensionArray` (:issue:`11363`)
+- :meth:`DataFrame.sort_values` and :meth:`Series.sort_values` have gained ``ignore_index`` keyword to be able to reset index after sorting (:issue:`30114`)
Build Changes
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 51330bfc55dc3..27c60aa03e590 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4704,6 +4704,7 @@ def sort_values(
inplace=False,
kind="quicksort",
na_position="last",
+ ignore_index=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
@@ -4737,6 +4738,9 @@ def sort_values(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
+ if ignore_index:
+ new_data.axes[1] = ibase.default_index(len(indexer))
+
if inplace:
return self._update_inplace(new_data)
else:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bea246c3f1b98..b0cf652d91df4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4087,6 +4087,7 @@ def sort_values(
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
+ ignore_index: bool_t = False,
):
"""
Sort by the values along either axis.
@@ -4109,6 +4110,10 @@ def sort_values(
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
+ ignore_index : bool, default False
+ If True, the resulting axis will be labeled 0, 1, …, n - 1.
+
+ .. versionadded:: 1.0.0
Returns
-------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 54c163330e6ee..1204676ed0c8a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2693,6 +2693,7 @@ def sort_values(
inplace=False,
kind="quicksort",
na_position="last",
+ ignore_index=False,
):
"""
Sort by the values.
@@ -2715,6 +2716,10 @@ def sort_values(
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
+ ignore_index : bool, default False
+ If True, the resulting axis will be labeled 0, 1, …, n - 1.
+
+ .. versionadded:: 1.0.0
Returns
-------
@@ -2820,7 +2825,7 @@ def _try_kind_sort(arr):
return arr.argsort(kind="quicksort")
arr = self._values
- sortedIdx = np.empty(len(self), dtype=np.int32)
+ sorted_index = np.empty(len(self), dtype=np.int32)
bad = isna(arr)
@@ -2844,16 +2849,19 @@ def _try_kind_sort(arr):
if na_position == "last":
n = good.sum()
- sortedIdx[:n] = idx[good][argsorted]
- sortedIdx[n:] = idx[bad]
+ sorted_index[:n] = idx[good][argsorted]
+ sorted_index[n:] = idx[bad]
elif na_position == "first":
n = bad.sum()
- sortedIdx[n:] = idx[good][argsorted]
- sortedIdx[:n] = idx[bad]
+ sorted_index[n:] = idx[good][argsorted]
+ sorted_index[:n] = idx[bad]
else:
raise ValueError(f"invalid na_position: {na_position}")
- result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
+ result = self._constructor(arr[sorted_index], index=self.index[sorted_index])
+
+ if ignore_index:
+ result.index = ibase.default_index(len(sorted_index))
if inplace:
self._update_inplace(result)
diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py
index 540bed452d9e9..e733c01e01740 100644
--- a/pandas/tests/frame/methods/test_sort_values.py
+++ b/pandas/tests/frame/methods/test_sort_values.py
@@ -460,3 +460,45 @@ def test_sort_values_na_position_with_categories_raises(self):
with pytest.raises(ValueError):
df.sort_values(by="c", ascending=False, na_position="bad_position")
+
+ @pytest.mark.parametrize(
+ "original_dict, sorted_dict, ignore_index, output_index",
+ [
+ ({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]),
+ ({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]),
+ (
+ {"A": [1, 2, 3], "B": [2, 3, 4]},
+ {"A": [3, 2, 1], "B": [4, 3, 2]},
+ True,
+ [0, 1, 2],
+ ),
+ (
+ {"A": [1, 2, 3], "B": [2, 3, 4]},
+ {"A": [3, 2, 1], "B": [4, 3, 2]},
+ False,
+ [2, 1, 0],
+ ),
+ ],
+ )
+ def test_sort_values_ignore_index(
+ self, original_dict, sorted_dict, ignore_index, output_index
+ ):
+ # GH 30114
+ df = DataFrame(original_dict)
+ expected = DataFrame(sorted_dict, index=output_index)
+
+ # Test when inplace is False
+ sorted_df = df.sort_values("A", ascending=False, ignore_index=ignore_index)
+ tm.assert_frame_equal(sorted_df, expected)
+
+ tm.assert_frame_equal(df, DataFrame(original_dict))
+
+ # Test when inplace is True
+ copied_df = df.copy()
+
+ copied_df.sort_values(
+ "A", ascending=False, ignore_index=ignore_index, inplace=True
+ )
+ tm.assert_frame_equal(copied_df, expected)
+
+ tm.assert_frame_equal(df, DataFrame(original_dict))
diff --git a/pandas/tests/series/methods/test_sort_values.py b/pandas/tests/series/methods/test_sort_values.py
index ec3b8385e79e7..2cea6f061de76 100644
--- a/pandas/tests/series/methods/test_sort_values.py
+++ b/pandas/tests/series/methods/test_sort_values.py
@@ -156,3 +156,30 @@ def test_sort_values_categorical(self):
result = df.sort_values(by=["grade", "id"])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "original_list, sorted_list, ignore_index, output_index",
+ [
+ ([2, 3, 6, 1], [6, 3, 2, 1], True, [0, 1, 2, 3]),
+ ([2, 3, 6, 1], [6, 3, 2, 1], False, [2, 1, 0, 3]),
+ ],
+ )
+ def test_sort_values_ignore_index(
+ self, original_list, sorted_list, ignore_index, output_index
+ ):
+ # GH 30114
+ sr = Series(original_list)
+ expected = Series(sorted_list, index=output_index)
+
+ # Test when inplace is False
+ sorted_sr = sr.sort_values(ascending=False, ignore_index=ignore_index)
+ tm.assert_series_equal(sorted_sr, expected)
+
+ tm.assert_series_equal(sr, Series(original_list))
+
+ # Test when inplace is True
+ copied_sr = sr.copy()
+ copied_sr.sort_values(ascending=False, ignore_index=ignore_index, inplace=True)
+ tm.assert_series_equal(copied_sr, expected)
+
+ tm.assert_series_equal(sr, Series(original_list))
| - [x] xref #30114
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This is the first step for closing the PR #30114, I will come up with another PR for `drop_duplicates` | https://api.github.com/repos/pandas-dev/pandas/pulls/30402 | 2019-12-22T12:14:23Z | 2019-12-27T16:12:23Z | 2019-12-27T16:12:23Z | 2019-12-27T16:12:27Z |
TYP: Type hints in pandas/io/formats/excel.py | diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 18340bc702378..9b0f100c1b041 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -4,6 +4,7 @@
from functools import reduce
import itertools
import re
+from typing import Callable, Dict, List, Optional, Sequence, Union
import warnings
import numpy as np
@@ -25,7 +26,9 @@ class ExcelCell:
__fields__ = ("row", "col", "val", "style", "mergestart", "mergeend")
__slots__ = __fields__
- def __init__(self, row, col, val, style=None, mergestart=None, mergeend=None):
+ def __init__(
+ self, row: int, col: int, val, style=None, mergestart=None, mergeend=None
+ ):
self.row = row
self.col = col
self.val = val
@@ -56,7 +59,7 @@ class CSSToExcelConverter:
# instancemethods so that users can easily experiment with extensions
# without monkey-patching.
- def __init__(self, inherited=None):
+ def __init__(self, inherited: Optional[str] = None):
if inherited is not None:
inherited = self.compute_css(inherited)
@@ -64,7 +67,7 @@ def __init__(self, inherited=None):
compute_css = CSSResolver()
- def __call__(self, declarations_str: str):
+ def __call__(self, declarations_str: str) -> Dict[str, Dict[str, str]]:
"""
Convert CSS declarations to ExcelWriter style.
@@ -84,7 +87,7 @@ def __call__(self, declarations_str: str):
properties = self.compute_css(declarations_str, self.inherited)
return self.build_xlstyle(properties)
- def build_xlstyle(self, props):
+ def build_xlstyle(self, props: Dict[str, str]) -> Dict[str, Dict[str, str]]:
out = {
"alignment": self.build_alignment(props),
"border": self.build_border(props),
@@ -95,7 +98,7 @@ def build_xlstyle(self, props):
# TODO: handle cell width and height: needs support in pandas.io.excel
- def remove_none(d):
+ def remove_none(d: Dict[str, str]) -> None:
"""Remove key where value is None, through nested dicts"""
for k, v in list(d.items()):
if v is None:
@@ -118,7 +121,7 @@ def remove_none(d):
# OpenXML also has 'justify', 'distributed'
}
- def build_alignment(self, props):
+ def build_alignment(self, props) -> Dict[str, Optional[Union[bool, str]]]:
# TODO: text-indent, padding-left -> alignment.indent
return {
"horizontal": props.get("text-align"),
@@ -130,7 +133,7 @@ def build_alignment(self, props):
),
}
- def build_border(self, props):
+ def build_border(self, props: Dict) -> Dict[str, Dict[str, str]]:
return {
side: {
"style": self._border_style(
@@ -142,7 +145,7 @@ def build_border(self, props):
for side in ["top", "right", "bottom", "left"]
}
- def _border_style(self, style, width):
+ def _border_style(self, style: Optional[str], width):
# convert styles and widths to openxml, one of:
# 'dashDot'
# 'dashDotDot'
@@ -191,7 +194,7 @@ def _border_style(self, style, width):
return "dashed"
return "mediumDashed"
- def build_fill(self, props):
+ def build_fill(self, props: Dict[str, str]):
# TODO: perhaps allow for special properties
# -excel-pattern-bgcolor and -excel-pattern-type
fill_color = props.get("background-color")
@@ -215,7 +218,7 @@ def build_fill(self, props):
}
ITALIC_MAP = {"normal": False, "italic": True, "oblique": True}
- def build_font(self, props):
+ def build_font(self, props) -> Dict[str, Optional[Union[bool, int, str]]]:
size = props.get("font-size")
if size is not None:
assert size.endswith("pt")
@@ -311,7 +314,7 @@ def build_font(self, props):
"white": "FFFFFF",
}
- def color_to_excel(self, val):
+ def color_to_excel(self, val: Optional[str]):
if val is None:
return None
if val.startswith("#") and len(val) == 7:
@@ -323,7 +326,7 @@ def color_to_excel(self, val):
except KeyError:
warnings.warn(f"Unhandled color format: {repr(val)}", CSSWarning)
- def build_number_format(self, props):
+ def build_number_format(self, props: Dict) -> Dict[str, Optional[str]]:
return {"format_code": props.get("number-format")}
@@ -366,15 +369,15 @@ class ExcelFormatter:
def __init__(
self,
df,
- na_rep="",
- float_format=None,
- cols=None,
- header=True,
- index=True,
- index_label=None,
- merge_cells=False,
- inf_rep="inf",
- style_converter=None,
+ na_rep: str = "",
+ float_format: Optional[str] = None,
+ cols: Optional[Sequence] = None,
+ header: Union[bool, List[str]] = True,
+ index: bool = True,
+ index_label: Union[str, Sequence, None] = None,
+ merge_cells: bool = False,
+ inf_rep: str = "inf",
+ style_converter: Optional[Callable] = None,
):
self.rowcounter = 0
self.na_rep = na_rep
@@ -442,10 +445,8 @@ def _format_header_mi(self):
if self.columns.nlevels > 1:
if not self.index:
raise NotImplementedError(
- "Writing to Excel with MultiIndex"
- " columns and no index "
- "('index'=False) is not yet "
- "implemented."
+ "Writing to Excel with MultiIndex columns and no "
+ "index ('index'=False) is not yet implemented."
)
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
@@ -540,7 +541,6 @@ def _format_header(self):
return itertools.chain(gen, gen2)
def _format_body(self):
-
if isinstance(self.df.index, ABCMultiIndex):
return self._format_hierarchical_rows()
else:
@@ -716,8 +716,7 @@ def write(
num_rows, num_cols = self.df.shape
if num_rows > self.max_rows or num_cols > self.max_cols:
raise ValueError(
- "This sheet is too large! Your sheet size is: "
- f"{num_rows}, {num_cols} "
+ f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} "
f"Max sheet size is: {self.max_rows}, {self.max_cols}"
)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30400 | 2019-12-22T11:56:16Z | 2020-01-02T01:41:11Z | 2020-01-02T01:41:11Z | 2020-01-02T01:44:15Z |
TYP: Typing hints in pandas/io/formats/{css,csvs}.py | diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index 583dd49d4c66a..b40d2a57b8106 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -1,4 +1,5 @@
-"""Utilities for interpreting CSS from Stylers for formatting non-HTML outputs
+"""
+Utilities for interpreting CSS from Stylers for formatting non-HTML outputs.
"""
import re
@@ -6,13 +7,15 @@
class CSSWarning(UserWarning):
- """This CSS syntax cannot currently be parsed"""
+ """
+ This CSS syntax cannot currently be parsed.
+ """
pass
def _side_expander(prop_fmt: str):
- def expand(self, prop, value):
+ def expand(self, prop, value: str):
tokens = value.split()
try:
mapping = self.SIDE_SHORTHANDS[len(tokens)]
@@ -28,12 +31,13 @@ def expand(self, prop, value):
class CSSResolver:
- """A callable for parsing and resolving CSS to atomic properties
-
+ """
+ A callable for parsing and resolving CSS to atomic properties.
"""
def __call__(self, declarations_str, inherited=None):
- """ the given declarations to atomic properties
+ """
+ The given declarations to atomic properties.
Parameters
----------
@@ -46,8 +50,8 @@ def __call__(self, declarations_str, inherited=None):
Returns
-------
- props : dict
- Atomic CSS 2.2 properties
+ dict
+ Atomic CSS 2.2 properties.
Examples
--------
@@ -69,7 +73,6 @@ def __call__(self, declarations_str, inherited=None):
('font-size', '24pt'),
('font-weight', 'bold')]
"""
-
props = dict(self.atomize(self.parse(declarations_str)))
if inherited is None:
inherited = {}
@@ -235,10 +238,15 @@ def atomize(self, declarations):
expand_margin = _side_expander("margin-{:s}")
expand_padding = _side_expander("padding-{:s}")
- def parse(self, declarations_str):
- """Generates (prop, value) pairs from declarations
+ def parse(self, declarations_str: str):
+ """
+ Generates (prop, value) pairs from declarations.
In a future version may generate parsed tokens from tinycss/tinycss2
+
+ Parameters
+ ----------
+ declarations_str : str
"""
for decl in declarations_str.split(";"):
if not decl.strip():
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 72ba1a892cb8f..0d581f30e50e7 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -5,13 +5,14 @@
import csv as csvlib
from io import StringIO
import os
-from typing import List
+from typing import Hashable, List, Mapping, Optional, Sequence, Union
import warnings
from zipfile import ZipFile
import numpy as np
from pandas._libs import writers as libwriters
+from pandas._typing import FilePathOrBuffer
from pandas.core.dtypes.generic import (
ABCDatetimeIndex,
@@ -33,27 +34,26 @@ class CSVFormatter:
def __init__(
self,
obj,
- path_or_buf=None,
- sep=",",
- na_rep="",
- float_format=None,
+ path_or_buf: Optional[FilePathOrBuffer[str]] = None,
+ sep: str = ",",
+ na_rep: str = "",
+ float_format: Optional[str] = None,
cols=None,
- header=True,
- index=True,
- index_label=None,
- mode="w",
- encoding=None,
- compression="infer",
- quoting=None,
+ header: Union[bool, Sequence[Hashable]] = True,
+ index: bool = True,
+ index_label: Optional[Union[bool, Hashable, Sequence[Hashable]]] = None,
+ mode: str = "w",
+ encoding: Optional[str] = None,
+ compression: Union[str, Mapping[str, str], None] = "infer",
+ quoting: Optional[int] = None,
line_terminator="\n",
- chunksize=None,
+ chunksize: Optional[int] = None,
quotechar='"',
- date_format=None,
- doublequote=True,
- escapechar=None,
+ date_format: Optional[str] = None,
+ doublequote: bool = True,
+ escapechar: Optional[str] = None,
decimal=".",
):
-
self.obj = obj
if path_or_buf is None:
@@ -154,14 +154,17 @@ def __init__(
if not index:
self.nlevels = 0
- def save(self):
+ def save(self) -> None:
"""
- Create the writer & save
+ Create the writer & save.
"""
# GH21227 internal compression is not used when file-like passed.
if self.compression and hasattr(self.path_or_buf, "write"):
- msg = "compression has no effect when passing file-like object as input."
- warnings.warn(msg, RuntimeWarning, stacklevel=2)
+ warnings.warn(
+ "compression has no effect when passing file-like object as input.",
+ RuntimeWarning,
+ stacklevel=2,
+ )
# when zip compression is called.
is_zip = isinstance(self.path_or_buf, ZipFile) or (
@@ -223,7 +226,6 @@ def save(self):
_fh.close()
def _save_header(self):
-
writer = self.writer
obj = self.obj
index_label = self.index_label
@@ -303,8 +305,7 @@ def _save_header(self):
encoded_labels.extend([""] * len(columns))
writer.writerow(encoded_labels)
- def _save(self):
-
+ def _save(self) -> None:
self._save_header()
nrows = len(self.data_index)
@@ -321,8 +322,7 @@ def _save(self):
self._save_chunk(start_i, end_i)
- def _save_chunk(self, start_i: int, end_i: int):
-
+ def _save_chunk(self, start_i: int, end_i: int) -> None:
data_index = self.data_index
# create the data for a chunk
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30398 | 2019-12-22T10:53:51Z | 2019-12-27T16:02:51Z | 2019-12-27T16:02:50Z | 2019-12-28T17:29:30Z |
STY: Underscores for long numbers | diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index a44f374264f09..86a9d053730b8 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -336,11 +336,22 @@ class Timestamp(_Timestamp):
"""
return cls(datetime.combine(date, time))
- def __new__(cls, object ts_input=_no_input,
- object freq=None, tz=None, unit=None,
- year=None, month=None, day=None,
- hour=None, minute=None, second=None, microsecond=None,
- nanosecond=None, tzinfo=None):
+ def __new__(
+ cls,
+ object ts_input=_no_input,
+ object freq=None,
+ tz=None,
+ unit=None,
+ year=None,
+ month=None,
+ day=None,
+ hour=None,
+ minute=None,
+ second=None,
+ microsecond=None,
+ nanosecond=None,
+ tzinfo=None
+ ):
# The parameter list folds together legacy parameter names (the first
# four) and positional and keyword parameter names from pydatetime.
#
@@ -401,8 +412,8 @@ class Timestamp(_Timestamp):
freq = None
if getattr(ts_input, 'tzinfo', None) is not None and tz is not None:
- raise ValueError("Cannot pass a datetime or Timestamp with tzinfo with the"
- " tz parameter. Use tz_convert instead.")
+ raise ValueError("Cannot pass a datetime or Timestamp with tzinfo with "
+ "the tz parameter. Use tz_convert instead.")
ts = convert_to_tsobject(ts_input, tz, unit, 0, 0, nanosecond or 0)
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 512a83ed304d1..25609cb852ed4 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -201,17 +201,17 @@ class TestTimestampConstructors:
def test_constructor(self):
base_str = "2014-07-01 09:00"
base_dt = datetime(2014, 7, 1, 9)
- base_expected = 1404205200000000000
+ base_expected = 1_404_205_200_000_000_000
# confirm base representation is correct
- assert calendar.timegm(base_dt.timetuple()) * 1000000000 == base_expected
+ assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
tests = [
(base_str, base_dt, base_expected),
(
"2014-07-01 10:00",
datetime(2014, 7, 1, 10),
- base_expected + 3600 * 1000000000,
+ base_expected + 3600 * 1_000_000_000,
),
(
"2014-07-01 09:00:00.000008000",
@@ -250,7 +250,7 @@ def test_constructor(self):
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:
- expected_tz = expected - offset * 3600 * 1000000000
+ expected_tz = expected - offset * 3600 * 1_000_000_000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
@@ -264,7 +264,7 @@ def test_constructor(self):
result = Timestamp(result).tz_convert("UTC")
else:
result = Timestamp(result, tz="UTC")
- expected_utc = expected - offset * 3600 * 1000000000
+ expected_utc = expected - offset * 3600 * 1_000_000_000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
@@ -272,14 +272,14 @@ def test_constructor_with_stringoffset(self):
# GH 7833
base_str = "2014-07-01 11:00:00+02:00"
base_dt = datetime(2014, 7, 1, 9)
- base_expected = 1404205200000000000
+ base_expected = 1_404_205_200_000_000_000
# confirm base representation is correct
- assert calendar.timegm(base_dt.timetuple()) * 1000000000 == base_expected
+ assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
tests = [
(base_str, base_expected),
- ("2014-07-01 12:00:00+02:00", base_expected + 3600 * 1000000000),
+ ("2014-07-01 12:00:00+02:00", base_expected + 3600 * 1_000_000_000),
("2014-07-01 11:00:00.000008000+02:00", base_expected + 8000),
("2014-07-01 11:00:00.000000005+02:00", base_expected + 5),
]
@@ -725,7 +725,7 @@ def test_utc_z_designator(self):
assert get_timezone(Timestamp("2014-11-02 01:00Z").tzinfo) is utc
def test_asm8(self):
- np.random.seed(7960929)
+ np.random.seed(7_960_929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
@@ -786,7 +786,7 @@ def compare(x, y):
)
def test_basics_nanos(self):
- val = np.int64(946684800000000000).view("M8[ns]")
+ val = np.int64(946_684_800_000_000_000).view("M8[ns]")
stamp = Timestamp(val.view("i8") + 500)
assert stamp.year == 2000
assert stamp.month == 1
@@ -794,7 +794,7 @@ def test_basics_nanos(self):
assert stamp.nanosecond == 500
# GH 14415
- val = np.iinfo(np.int64).min + 80000000000000
+ val = np.iinfo(np.int64).min + 80_000_000_000_000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
@@ -807,8 +807,8 @@ def test_basics_nanos(self):
[
[946688461000000000, {}],
[946688461000000000 / 1000, dict(unit="us")],
- [946688461000000000 / 1000000, dict(unit="ms")],
- [946688461000000000 / 1000000000, dict(unit="s")],
+ [946688461000000000 / 1_000_000, dict(unit="ms")],
+ [946688461000000000 / 1_000_000_000, dict(unit="s")],
[10957, dict(unit="D", h=0)],
[
(946688461000000000 + 500000) / 1000000000,
@@ -852,24 +852,24 @@ def test_roundtrip(self):
base = Timestamp("20140101 00:00:00")
result = Timestamp(base.value + Timedelta("5ms").value)
- assert result == Timestamp(str(base) + ".005000")
+ assert result == Timestamp(f"{base}.005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta("5us").value)
- assert result == Timestamp(str(base) + ".000005")
+ assert result == Timestamp(f"{base}.000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta("5ns").value)
- assert result == Timestamp(str(base) + ".000000005")
+ assert result == Timestamp(f"{base}.000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta("6ms 5us").value)
- assert result == Timestamp(str(base) + ".006005")
+ assert result == Timestamp(f"{base}.006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta("200ms 5us").value)
- assert result == Timestamp(str(base) + ".200005")
+ assert result == Timestamp(f"{base}.200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
@@ -890,12 +890,12 @@ def test_nanosecond_string_parsing(self):
ts = Timestamp("2013-05-01 07:15:45.123456789")
# GH 7878
expected_repr = "2013-05-01 07:15:45.123456789"
- expected_value = 1367392545123456789
+ expected_value = 1_367_392_545_123_456_789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789+09:00", tz="Asia/Tokyo")
- assert ts.value == expected_value - 9 * 3600 * 1000000000
+ assert ts.value == expected_value - 9 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="UTC")
@@ -903,7 +903,7 @@ def test_nanosecond_string_parsing(self):
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="US/Eastern")
- assert ts.value == expected_value + 4 * 3600 * 1000000000
+ assert ts.value == expected_value + 4 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
# GH 10041
@@ -913,7 +913,7 @@ def test_nanosecond_string_parsing(self):
def test_nanosecond_timestamp(self):
# GH 7610
- expected = 1293840000000000005
+ expected = 1_293_840_000_000_000_005
t = Timestamp("2011-01-01") + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
@@ -929,7 +929,7 @@ def test_nanosecond_timestamp(self):
assert t.value == expected
assert t.nanosecond == 5
- expected = 1293840000000000010
+ expected = 1_293_840_000_000_000_010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
@@ -949,23 +949,23 @@ def test_nanosecond_timestamp(self):
class TestTimestampToJulianDate:
def test_compare_1700(self):
r = Timestamp("1700-06-23").to_julian_date()
- assert r == 2342145.5
+ assert r == 2_342_145.5
def test_compare_2000(self):
r = Timestamp("2000-04-12").to_julian_date()
- assert r == 2451646.5
+ assert r == 2_451_646.5
def test_compare_2100(self):
r = Timestamp("2100-08-12").to_julian_date()
- assert r == 2488292.5
+ assert r == 2_488_292.5
def test_compare_hour01(self):
r = Timestamp("2000-08-12T01:00:00").to_julian_date()
- assert r == 2451768.5416666666666666
+ assert r == 2_451_768.5416666666666666
def test_compare_hour13(self):
r = Timestamp("2000-08-12T13:00:00").to_julian_date()
- assert r == 2451769.0416666666666666
+ assert r == 2_451_769.0416666666666666
class TestTimestampConversion:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30397 | 2019-12-22T04:31:26Z | 2019-12-22T17:21:32Z | 2019-12-22T17:21:32Z | 2019-12-22T21:12:18Z |
TST: Added fromisocalendar test cases | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index df747cb9654a9..32dab3211d42a 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -227,6 +227,7 @@ Other enhancements
- Added new writer for exporting Stata dta files in version 118, ``StataWriter118``. This format supports exporting strings containing Unicode characters (:issue:`23573`)
- :meth:`Series.map` now accepts ``collections.abc.Mapping`` subclasses as a mapper (:issue:`29733`)
- The ``pandas.datetime`` class is now deprecated. Import from ``datetime`` instead (:issue:`30296`)
+- :meth:`Timestamp.fromisocalendar` is now compatible with python 3.8 and above (:issue:`28115`)
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 76a694c64e1fb..67c0f0cc33ab8 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -5,6 +5,9 @@ from cpython.object cimport (
from cpython.datetime cimport (datetime,
PyDateTime_Check, PyDelta_Check,
PyDateTime_IMPORT)
+
+from cpython.version cimport PY_MINOR_VERSION
+
PyDateTime_IMPORT
import numpy as np
@@ -19,6 +22,7 @@ from pandas._libs.tslibs.util cimport (
get_nat, is_integer_object, is_float_object, is_datetime64_object,
is_timedelta64_object)
+
# ----------------------------------------------------------------------
# Constants
nat_strings = {'NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN'}
@@ -427,6 +431,10 @@ class NaTType(_NaT):
tzname = _make_error_func('tzname', datetime)
utcoffset = _make_error_func('utcoffset', datetime)
+ # "fromisocalendar" was introduced in 3.8
+ if PY_MINOR_VERSION >= 8:
+ fromisocalendar = _make_error_func('fromisocalendar', datetime)
+
# ----------------------------------------------------------------------
# The remaining methods have docstrings copy/pasted from the analogous
# Timestamp methods.
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index b1594dee9bc34..a537f000959e3 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -123,6 +123,13 @@ def test_round_nat(klass, method, freq):
"dst",
"fromordinal",
"fromtimestamp",
+ pytest.param(
+ "fromisocalendar",
+ marks=pytest.mark.skipif(
+ not compat.PY38,
+ reason="'fromisocalendar' was added in stdlib datetime in python 3.8",
+ ),
+ ),
"isocalendar",
"strftime",
"strptime",
@@ -297,6 +304,8 @@ def test_overlap_public_nat_methods(klass, expected):
# "fromisoformat" was introduced in 3.7
if klass is Timestamp and not compat.PY37:
expected.remove("fromisoformat")
+
+ # "fromisocalendar" was introduced in 3.8
if klass is Timestamp and not compat.PY38:
expected.remove("fromisocalendar")
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 18a8d4b4ad708..f1fcf46a936fd 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -14,6 +14,7 @@
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz, get_timezone
+import pandas.compat as compat
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
@@ -700,6 +701,19 @@ class SubDatetime(datetime):
expected = Timestamp(2000, 1, 1)
assert result == expected
+ @pytest.mark.skipif(
+ not compat.PY38,
+ reason="datetime.fromisocalendar was added in Python version 3.8",
+ )
+ def test_constructor_fromisocalendar(self):
+ # GH 30395
+ expected_timestamp = Timestamp("2000-01-03 00:00:00")
+ expected_stdlib = datetime.fromisocalendar(2000, 1, 1)
+ result = Timestamp.fromisocalendar(2000, 1, 1)
+ assert result == expected_timestamp
+ assert result == expected_stdlib
+ assert isinstance(result, Timestamp)
+
class TestTimestamp:
def test_tz(self):
| - [x] closes #28115
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30395 | 2019-12-21T21:34:17Z | 2020-01-06T13:30:42Z | 2020-01-06T13:30:41Z | 2020-01-06T13:37:27Z |
CLN: tslibs.parsing | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 598def4e1d9fa..cbe6dd6c2322d 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -188,7 +188,7 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
return result
-def _test_parse_iso8601(object ts):
+def _test_parse_iso8601(ts: str):
"""
TESTING ONLY: Parse string into Timestamp using iso8601 parser. Used
only for testing, actual construction uses `convert_str_to_tsobject`
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index c5315219b8422..2988d7bae9a5e 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -444,15 +444,15 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit,
bint dayfirst=False,
bint yearfirst=False):
"""
- Convert a string-like (bytes or unicode) input `ts`, along with optional
- timezone object `tz` to a _TSObject.
+ Convert a string input `ts`, along with optional timezone object`tz`
+ to a _TSObject.
The optional arguments `dayfirst` and `yearfirst` are passed to the
dateutil parser.
Parameters
----------
- ts : bytes or unicode
+ ts : str
Value to be converted to _TSObject
tz : tzinfo or None
timezone for the timezone-aware output
diff --git a/pandas/_libs/tslibs/frequencies.pxd b/pandas/_libs/tslibs/frequencies.pxd
index 4e7949e55c836..6ec67ce250505 100644
--- a/pandas/_libs/tslibs/frequencies.pxd
+++ b/pandas/_libs/tslibs/frequencies.pxd
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-cpdef object get_rule_month(object source, object default=*)
+cpdef str get_rule_month(object source, str default=*)
cpdef get_freq_code(freqstr)
cpdef object get_freq(object freq)
diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx
index 660f4ddcec736..d60f5cfd3f8c1 100644
--- a/pandas/_libs/tslibs/frequencies.pyx
+++ b/pandas/_libs/tslibs/frequencies.pyx
@@ -485,18 +485,18 @@ cdef bint _is_weekly(str rule):
# ----------------------------------------------------------------------
-cpdef object get_rule_month(object source, object default='DEC'):
+cpdef str get_rule_month(object source, str default="DEC"):
"""
Return starting month of given freq, default is December.
Parameters
----------
source : object
- default : object (default "DEC")
+ default : str, default "DEC"
Returns
-------
- rule_month: object (usually string)
+ rule_month: str
Examples
--------
diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd
index 020bcdf0a7b15..ebedee79405e5 100644
--- a/pandas/_libs/tslibs/np_datetime.pxd
+++ b/pandas/_libs/tslibs/np_datetime.pxd
@@ -72,6 +72,6 @@ cdef npy_datetime get_datetime64_value(object obj) nogil
cdef npy_timedelta get_timedelta64_value(object obj) nogil
cdef NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil
-cdef int _string_to_dts(object val, npy_datetimestruct* dts,
+cdef int _string_to_dts(str val, npy_datetimestruct* dts,
int* out_local, int* out_tzoffset,
bint want_exc) except? -1
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index b9406074bb130..b59a1101e0bf7 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -167,7 +167,7 @@ cdef inline int64_t pydate_to_dt64(date val, npy_datetimestruct *dts):
return dtstruct_to_dt64(dts)
-cdef inline int _string_to_dts(object val, npy_datetimestruct* dts,
+cdef inline int _string_to_dts(str val, npy_datetimestruct* dts,
int* out_local, int* out_tzoffset,
bint want_exc) except? -1:
cdef:
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index ecf3e35c86d76..3705b0a41fe55 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -3,7 +3,6 @@ Parsing functions for datetime and datetime-like strings.
"""
import re
import time
-from io import StringIO
from libc.string cimport strchr
@@ -11,9 +10,8 @@ import cython
from cython import Py_ssize_t
from cpython.object cimport PyObject_Str
-from cpython.unicode cimport PyUnicode_Join
-from cpython.datetime cimport datetime, datetime_new, import_datetime
+from cpython.datetime cimport datetime, datetime_new, import_datetime, tzinfo
from cpython.version cimport PY_VERSION_HEX
import_datetime()
@@ -37,6 +35,7 @@ from pandas._config import get_option
from pandas._libs.tslibs.ccalendar import MONTH_NUMBERS
from pandas._libs.tslibs.nattype import nat_strings, NaT
from pandas._libs.tslibs.util cimport is_array, get_c_string_buf_and_size
+from pandas._libs.tslibs.frequencies cimport get_rule_month
cdef extern from "../src/headers/portable.h":
int getdigit_ascii(char c, int default) nogil
@@ -86,16 +85,15 @@ cdef inline int _parse_4digit(const char* s):
return result
-cdef inline object _parse_delimited_date(object date_string, bint dayfirst):
+cdef inline object _parse_delimited_date(str date_string, bint dayfirst):
"""
Parse special cases of dates: MM/DD/YYYY, DD/MM/YYYY, MM/YYYY.
+
At the beginning function tries to parse date in MM/DD/YYYY format, but
if month > 12 - in DD/MM/YYYY (`dayfirst == False`).
With `dayfirst == True` function makes an attempt to parse date in
DD/MM/YYYY, if an attempt is wrong - in DD/MM/YYYY
- Note
- ----
For MM/DD/YYYY, DD/MM/YYYY: delimiter can be a space or one of /-.
For MM/YYYY: delimiter can be a space or one of /-
If `date_string` can't be converted to date, then function returns
@@ -104,11 +102,13 @@ cdef inline object _parse_delimited_date(object date_string, bint dayfirst):
Parameters
----------
date_string : str
- dayfirst : bint
+ dayfirst : bool
Returns:
--------
- datetime, resolution
+ datetime or None
+ str or None
+ Describing resolution of the parsed string.
"""
cdef:
const char* buf
@@ -156,18 +156,19 @@ cdef inline object _parse_delimited_date(object date_string, bint dayfirst):
raise DateParseError(f"Invalid date specified ({month}/{day})")
-cdef inline bint does_string_look_like_time(object parse_string):
+cdef inline bint does_string_look_like_time(str parse_string):
"""
Checks whether given string is a time: it has to start either from
H:MM or from HH:MM, and hour and minute values must be valid.
Parameters
----------
- date_string : str
+ parse_string : str
Returns:
--------
- whether given string is a time
+ bool
+ Whether given string is potentially a time.
"""
cdef:
const char* buf
@@ -188,9 +189,10 @@ cdef inline bint does_string_look_like_time(object parse_string):
return 0 <= hour <= 23 and 0 <= minute <= 59
-def parse_datetime_string(date_string, freq=None, dayfirst=False,
+def parse_datetime_string(date_string: str, freq=None, dayfirst=False,
yearfirst=False, **kwargs):
- """parse datetime string, only returns datetime.
+ """
+ Parse datetime string, only returns datetime.
Also cares special handling matching time patterns.
Returns
@@ -270,16 +272,17 @@ def parse_time_string(arg: str, freq=None, dayfirst=None, yearfirst=None):
return res
-cdef parse_datetime_string_with_reso(date_string, freq=None, dayfirst=False,
+cdef parse_datetime_string_with_reso(str date_string, freq=None, dayfirst=False,
yearfirst=False):
- """parse datetime string, only returns datetime
+ """
+ Parse datetime string and try to identify its resolution.
Returns
-------
- parsed : datetime
- parsed2 : datetime/dateutil.parser._result
- reso : str
- inferred resolution
+ datetime
+ datetime/dateutil.parser._result
+ str
+ Inferred resolution of the parsed string.
Raises
------
@@ -315,18 +318,19 @@ cdef parse_datetime_string_with_reso(date_string, freq=None, dayfirst=False,
return parsed, parsed, reso
-cpdef bint _does_string_look_like_datetime(object py_string):
+cpdef bint _does_string_look_like_datetime(str py_string):
"""
Checks whether given string is a datetime: it has to start with '0' or
be greater than 1000.
Parameters
----------
- py_string: object
+ py_string: str
Returns
-------
- whether given string is a datetime
+ bool
+ Whether given string is potentially a datetime.
"""
cdef:
const char *buf
@@ -370,9 +374,6 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
# special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1
assert isinstance(date_string, str)
- # len(date_string) == 0
- # should be NaT???
-
if date_string in nat_strings:
return NaT, NaT, ''
@@ -427,7 +428,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
if freq is not None:
# hack attack, #1228
try:
- mnum = MONTH_NUMBERS[_get_rule_month(freq)] + 1
+ mnum = MONTH_NUMBERS[get_rule_month(freq)] + 1
except (KeyError, ValueError):
raise DateParseError(f'Unable to retrieve month '
f'information from given '
@@ -467,21 +468,16 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
raise ValueError(f'Unable to parse {date_string}')
-cdef dateutil_parse(object timestr, object default, ignoretz=False,
+cdef dateutil_parse(str timestr, object default, ignoretz=False,
tzinfos=None, dayfirst=None, yearfirst=None):
""" lifted from dateutil to get resolution"""
cdef:
- object fobj, res, attr, ret, tzdata
+ object res, attr, ret, tzdata
object reso = None
dict repl = {}
- fobj = StringIO(str(timestr))
- res = DEFAULTPARSER._parse(fobj, dayfirst=dayfirst, yearfirst=yearfirst)
-
- # dateutil 2.2 compat
- if isinstance(res, tuple): # PyTuple_Check
- res, _ = res
+ res, _ = DEFAULTPARSER._parse(timestr, dayfirst=dayfirst, yearfirst=yearfirst)
if res is None:
raise ValueError(f"Unknown datetime string format, unable to parse: {timestr}")
@@ -507,20 +503,22 @@ cdef dateutil_parse(object timestr, object default, ignoretz=False,
ret = ret + relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if callable(tzinfos) or tzinfos and res.tzname in tzinfos:
+ # Note: as of 1.0 this is not reached because
+ # we never pass tzinfos, see GH#22234
if callable(tzinfos):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
- if isinstance(tzdata, datetime.tzinfo):
- tzinfo = tzdata
+ if isinstance(tzdata, tzinfo):
+ new_tzinfo = tzdata
elif isinstance(tzdata, str):
- tzinfo = _dateutil_tzstr(tzdata)
+ new_tzinfo = _dateutil_tzstr(tzdata)
elif isinstance(tzdata, int):
- tzinfo = tzoffset(res.tzname, tzdata)
+ new_tzinfo = tzoffset(res.tzname, tzdata)
else:
raise ValueError("offset must be tzinfo subclass, "
"tz string, or int offset")
- ret = ret.replace(tzinfo=tzinfo)
+ ret = ret.replace(tzinfo=new_tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=_dateutil_tzlocal())
elif res.tzoffset == 0:
@@ -530,27 +528,6 @@ cdef dateutil_parse(object timestr, object default, ignoretz=False,
return ret, reso
-cdef object _get_rule_month(object source, object default='DEC'):
- """
- Return starting month of given freq, default is December.
-
- Example
- -------
- >>> _get_rule_month('D')
- 'DEC'
-
- >>> _get_rule_month('A-JAN')
- 'JAN'
- """
- if hasattr(source, 'freqstr'):
- source = source.freqstr
- source = source.upper()
- if '-' not in source:
- return default
- else:
- return source.split('-')[1]
-
-
# ----------------------------------------------------------------------
# Parsing for type-inference
@@ -939,14 +916,14 @@ def _concat_date_cols(tuple date_cols, bint keep_trivial_numbers=True):
Parameters
----------
- date_cols : tuple of numpy arrays
+ date_cols : tuple[ndarray]
keep_trivial_numbers : bool, default True
if True and len(date_cols) == 1, then
conversion (to string from integer/float zero) is not performed
Returns
-------
- arr_of_rows : ndarray (dtype=object)
+ arr_of_rows : ndarray[object]
Examples
--------
@@ -1004,6 +981,6 @@ def _concat_date_cols(tuple date_cols, bint keep_trivial_numbers=True):
item = PyArray_GETITEM(array, PyArray_ITER_DATA(it))
list_to_join[col_idx] = convert_to_unicode(item, False)
PyArray_ITER_NEXT(it)
- result_view[row_idx] = PyUnicode_Join(' ', list_to_join)
+ result_view[row_idx] = " ".join(list_to_join)
return result
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index a6503c00a41bb..a8dabac1527b5 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1191,12 +1191,15 @@ cdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) except? -1:
return dtstruct_to_dt64(&dts)
-def period_format(int64_t value, int freq, object fmt=None):
+cdef str period_format(int64_t value, int freq, object fmt=None):
cdef:
int freq_group
if value == NPY_NAT:
- return repr(NaT)
+ return "NaT"
+
+ if isinstance(fmt, str):
+ fmt = fmt.encode("utf-8")
if fmt is None:
freq_group = get_freq_group(freq)
@@ -1242,24 +1245,22 @@ cdef list extra_fmts = [(b"%q", b"^`AB`^"),
cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^",
"^`GH`^", "^`IJ`^", "^`KL`^"]
-cdef object _period_strftime(int64_t value, int freq, object fmt):
+cdef str _period_strftime(int64_t value, int freq, bytes fmt):
cdef:
Py_ssize_t i
npy_datetimestruct dts
char *formatted
- object pat, repl, result
+ bytes pat, brepl
list found_pat = [False] * len(extra_fmts)
int year, quarter
-
- if isinstance(fmt, unicode):
- fmt = fmt.encode('utf-8')
+ str result, repl
get_date_info(value, freq, &dts)
for i in range(len(extra_fmts)):
pat = extra_fmts[i][0]
- repl = extra_fmts[i][1]
+ brepl = extra_fmts[i][1]
if pat in fmt:
- fmt = fmt.replace(pat, repl)
+ fmt = fmt.replace(pat, brepl)
found_pat[i] = True
formatted = c_strftime(&dts, <char*>fmt)
@@ -2234,7 +2235,7 @@ cdef class _Period:
object_state = None, self.freq, self.ordinal
return (Period, object_state)
- def strftime(self, fmt):
+ def strftime(self, fmt: str) -> str:
"""
Returns the string representation of the :class:`Period`, depending
on the selected ``fmt``. ``fmt`` must be a string
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 05aac976d54db..5b37ebb42aecc 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -1097,6 +1097,8 @@ def __call__(self, x, pos=0):
return ""
else:
fmt = self.formatdict.pop(x, "")
+ if isinstance(fmt, np.bytes_):
+ fmt = fmt.decode("utf-8")
return Period(ordinal=int(x), freq=self.freq).strftime(fmt)
| - [x] closes #22234
Non-segfault-related pieces broken off from #30374. Fix a (not-reachable-but-thats-another-conversation) tzinfo check closing #22234.
De-duplicates get_rule_month in tslibs.frequencies and tslibs.parsing
pythonizes a PyUnicode_Join usage because cython does that optimization already. | https://api.github.com/repos/pandas-dev/pandas/pulls/30394 | 2019-12-21T21:32:09Z | 2019-12-23T18:32:25Z | 2019-12-23T18:32:24Z | 2019-12-23T18:38:14Z |
CI: transition powershell-> bash | diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
index 86807b4010988..536c2fdbf1353 100644
--- a/ci/azure/windows.yml
+++ b/ci/azure/windows.yml
@@ -31,7 +31,8 @@ jobs:
- bash: |
source activate pandas-dev
conda list
- ci\\incremental\\build.cmd
+ python setup.py build_ext -q -i
+ python -m pip install --no-build-isolation -e .
displayName: 'Build'
- bash: |
source activate pandas-dev
diff --git a/ci/incremental/build.cmd b/ci/incremental/build.cmd
deleted file mode 100644
index b61b59e287299..0000000000000
--- a/ci/incremental/build.cmd
+++ /dev/null
@@ -1,9 +0,0 @@
-@rem https://github.com/numba/numba/blob/master/buildscripts/incremental/build.cmd
-
-@rem Build extensions
-python setup.py build_ext -q -i
-
-@rem Install pandas
-python -m pip install --no-build-isolation -e .
-
-if %errorlevel% neq 0 exit /b %errorlevel%
| xref #26344
If this works, i think the remaining places to bashify are
- identical powerhsell commands in windows.yml and posix.yml to check for test failures
- conda.recipe/bld.bat (do we need this directory at all?)
- test.bat, test_fast.bat (do we need the bash versions of these?) | https://api.github.com/repos/pandas-dev/pandas/pulls/30393 | 2019-12-21T21:26:50Z | 2019-12-23T08:49:27Z | 2019-12-23T08:49:26Z | 2019-12-24T01:17:51Z |
CI: clean environment.yml, comment that req-dev is generated | diff --git a/environment.yml b/environment.yml
index 2b171d097a693..f930458d0a855 100644
--- a/environment.yml
+++ b/environment.yml
@@ -33,7 +33,8 @@ dependencies:
- nbconvert>=5.4.1
- nbsphinx
- pandoc
- # Dask and its dependencies
+
+ # Dask and its dependencies (that dont install with dask)
- dask-core
- toolz>=0.7.3
- fsspec>=0.5.1
@@ -54,6 +55,8 @@ dependencies:
- pytest>=5.0.1
- pytest-cov
- pytest-xdist>=1.21
+
+ # downstream tests
- seaborn
- statsmodels
@@ -74,22 +77,28 @@ dependencies:
- scipy>=1.1
# optional for io
- - beautifulsoup4>=4.6.0 # pandas.read_html
+ # ---------------
+ # pd.read_html
+ - beautifulsoup4>=4.6.0
+ - html5lib
+ - lxml
+
+ # pd.read_excel, DataFrame.to_excel, pd.ExcelWriter, pd.ExcelFile
+ - openpyxl<=3.0.1
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ - odfpy
+
- fastparquet>=0.3.2 # pandas.read_parquet, DataFrame.to_parquet
- - html5lib # pandas.read_html
- - lxml # pandas.read_html
- - openpyxl<=3.0.1 # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- pyarrow>=0.13.1 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather
+ - python-snappy # required by pyarrow
+
- pyqt>=5.9.2 # pandas.read_clipboard
- pytables>=3.4.2 # pandas.read_hdf, DataFrame.to_hdf
- - python-snappy # required by pyarrow
- s3fs # pandas.read_csv... when using 's3://...' path
- sqlalchemy # pandas.read_sql, DataFrame.to_sql
- xarray # DataFrame.to_xarray
- - xlrd # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- - xlsxwriter # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- - xlwt # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- - odfpy # pandas.read_excel
- pyreadstat # pandas.read_spss
- pip:
- git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 5f67726a3e476..827bb809d46e4 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,3 +1,6 @@
+# This file is auto-generated from environment.yml, do not modify.
+# See that file for comments about the need/usage of each depdendency.
+
numpy>=1.15
python-dateutil>=2.6.1
pytz
@@ -48,20 +51,20 @@ matplotlib>=2.2.2
numexpr>=2.6.8
scipy>=1.1
beautifulsoup4>=4.6.0
-fastparquet>=0.3.2
html5lib
lxml
openpyxl<=3.0.1
+xlrd
+xlsxwriter
+xlwt
+odfpy
+fastparquet>=0.3.2
pyarrow>=0.13.1
+python-snappy
pyqt5>=5.9.2
tables>=3.4.2
-python-snappy
s3fs
sqlalchemy
xarray
-xlrd
-xlsxwriter
-xlwt
-odfpy
pyreadstat
git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master
\ No newline at end of file
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index 6f809669d917f..1d2c33aeee384 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -87,9 +87,14 @@ def main(conda_fname, pip_fname, compare=False):
elif isinstance(dep, dict) and len(dep) == 1 and "pip" in dep:
pip_deps += dep["pip"]
else:
- raise ValueError("Unexpected dependency {}".format(dep))
+ raise ValueError(f"Unexpected dependency {dep}")
- pip_content = "\n".join(pip_deps)
+ fname = os.path.split(conda_fname)[1]
+ header = (
+ f"# This file is auto-generated from {fname}, do not modify.\n"
+ "# See that file for comments about the need/usage of each depdendency.\n\n"
+ )
+ pip_content = header + "\n".join(pip_deps)
if compare:
with open(pip_fname) as pip_fd:
| There is a little more id like to do in environment.yml to put docbuild things in the same place, but holding off to keep the diff tidier. Also it appears there are some unused things in environment.yml that we can look into removing, separate PR | https://api.github.com/repos/pandas-dev/pandas/pulls/30392 | 2019-12-21T21:02:25Z | 2019-12-25T22:28:19Z | 2019-12-25T22:28:19Z | 2019-12-30T12:27:31Z |
TYP: Annotations in core/indexes/ | diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py
index 209f889e809c3..ac1b0ab766a03 100644
--- a/pandas/core/indexers.py
+++ b/pandas/core/indexers.py
@@ -144,8 +144,9 @@ def validate_indices(indices: np.ndarray, n: int) -> None:
if len(indices):
min_idx = indices.min()
if min_idx < -1:
- msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
- raise ValueError(msg)
+ raise ValueError(
+ f"'indices' contains values less than allowed ({min_idx} < -1)"
+ )
max_idx = indices.max()
if max_idx >= n:
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index ae27aad3dda08..db774a03c02f8 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -26,8 +26,7 @@ class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
def __init__(self, data, orig):
if not isinstance(data, ABCSeries):
raise TypeError(
- f"cannot convert an object of type {type(data)} to a "
- "datetimelike index"
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
)
self._parent = data
@@ -91,9 +90,8 @@ def _delegate_property_get(self, name):
def _delegate_property_set(self, name, value, *args, **kwargs):
raise ValueError(
- "modifications to a property of a datetimelike "
- "object are not supported. Change values on the "
- "original."
+ "modifications to a property of a datetimelike object are not supported. "
+ "Change values on the original."
)
def _delegate_method(self, name, *args, **kwargs):
@@ -222,7 +220,7 @@ def to_pytimedelta(self):
Returns
-------
- a : numpy.ndarray
+ numpy.ndarray
Array of 1D containing data with `datetime.timedelta` type.
See Also
@@ -314,8 +312,7 @@ def __new__(cls, data):
if not isinstance(data, ABCSeries):
raise TypeError(
- f"cannot convert an object of type {type(data)} to a "
- "datetimelike index"
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
)
orig = data if is_categorical_dtype(data) else None
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index e99ae96f35315..1904456848396 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -124,7 +124,6 @@ def _get_combined_index(
-------
Index
"""
-
# TODO: handle index names!
indexes = _get_distinct_objs(indexes)
if len(indexes) == 0:
@@ -273,7 +272,6 @@ def get_consensus_names(indexes):
list
A list representing the consensus 'names' found.
"""
-
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = {tuple(i.names) for i in indexes if com.any_not_none(*i.names)}
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 5abd049b9564c..ce7a238daeca9 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -804,11 +804,10 @@ def _assert_take_fillable(
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
- msg = (
+ raise ValueError(
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
- raise ValueError(msg)
taken = algos.take(
values, indices, allow_fill=allow_fill, fill_value=na_value
)
@@ -1324,8 +1323,7 @@ def set_names(self, names, level=None, inplace=False):
raise ValueError("Level must be None for non-MultiIndex")
if level is not None and not is_list_like(level) and is_list_like(names):
- msg = "Names must be a string when a single level is provided."
- raise TypeError(msg)
+ raise TypeError("Names must be a string when a single level is provided.")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
@@ -1421,8 +1419,8 @@ def _validate_index_level(self, level):
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError(
- f"Too many levels: Index has only 1 level,"
- f" {level} is not a valid level number"
+ "Too many levels: Index has only 1 level, "
+ f"{level} is not a valid level number"
)
elif level > 0:
raise IndexError(
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 44478d00da9cf..d35117b8db86e 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -715,9 +715,7 @@ def _convert_list_indexer(self, keyarr, kind=None):
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
- "a list-indexer must only "
- "include values that are "
- "in the categories"
+ "a list-indexer must only include values that are in the categories"
)
return self.get_indexer(keyarr)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 50dbddec5c8b2..3bf6dce00a031 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -2,7 +2,7 @@
Base and utility classes for tseries type pandas objects.
"""
import operator
-from typing import Set
+from typing import List, Set
import numpy as np
@@ -73,7 +73,7 @@ def method(self, other):
class DatetimeIndexOpsMixin(ExtensionOpsMixin):
"""
- common ops mixin to support a unified interface datetimelike Index
+ Common ops mixin to support a unified interface datetimelike Index.
"""
_data: ExtensionArray
@@ -336,7 +336,7 @@ def _convert_tolerance(self, tolerance, target):
raise ValueError("list-like tolerance size must match target index size")
return tolerance
- def tolist(self):
+ def tolist(self) -> List:
"""
Return a list of the underlying data.
"""
@@ -661,11 +661,12 @@ def _summary(self, name=None):
Parameters
----------
name : str
- name to use in the summary representation
+ Name to use in the summary representation.
Returns
-------
- String with a summarized representation of the index
+ str
+ Summarized representation of the index.
"""
formatter = self._formatter_func
if len(self) > 0:
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 523c434cb7377..1fd962dd24656 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -45,9 +45,10 @@
def _new_DatetimeIndex(cls, d):
- """ This is called upon unpickling, rather than the default which doesn't
- have arguments and breaks __new__ """
-
+ """
+ This is called upon unpickling, rather than the default which doesn't
+ have arguments and breaks __new__
+ """
if "data" in d and not isinstance(d["data"], DatetimeIndex):
# Avoid need to verify integrity by calling simple_new directly
data = d.pop("data")
@@ -100,9 +101,9 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin):
Parameters
----------
- data : array-like (1-dimensional), optional
+ data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with.
- copy : bool
+ copy : bool
Make a copy of input ndarray.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
@@ -273,7 +274,7 @@ def __new__(
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None):
"""
- we require the we have a dtype compat for the values
+ We require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if isinstance(values, DatetimeArray):
@@ -345,7 +346,13 @@ def tz(self, value):
@cache_readonly
def _is_dates_only(self) -> bool:
- """Return a boolean if we are only dates (and don't have a timezone)"""
+ """
+ Return a boolean if we are only dates (and don't have a timezone)
+
+ Returns
+ -------
+ bool
+ """
from pandas.io.formats.format import _is_dates_only
return _is_dates_only(self.values) and self.tz is None
@@ -360,7 +367,9 @@ def __reduce__(self):
return _new_DatetimeIndex, (type(self), d), None
def __setstate__(self, state):
- """Necessary for making this object picklable"""
+ """
+ Necessary for making this object picklable.
+ """
if isinstance(state, dict):
super().__setstate__(state)
@@ -393,7 +402,9 @@ def __setstate__(self, state):
_unpickle_compat = __setstate__
def _convert_for_op(self, value):
- """ Convert value to be insertable to ndarray """
+ """
+ Convert value to be insertable to ndarray.
+ """
if self._has_same_tz(value):
return _to_M8(value)
raise ValueError("Passed item and index have different timezone")
@@ -461,7 +472,7 @@ def _union(self, other, sort):
def union_many(self, others):
"""
- A bit of a hack to accelerate unioning a collection of indexes
+ A bit of a hack to accelerate unioning a collection of indexes.
"""
this = self
@@ -489,7 +500,7 @@ def union_many(self, others):
this._data._dtype = dtype
return this
- def _can_fast_union(self, other):
+ def _can_fast_union(self, other) -> bool:
if not isinstance(other, DatetimeIndex):
return False
@@ -581,7 +592,7 @@ def intersection(self, other, sort=False):
Returns
-------
- y : Index or DatetimeIndex or TimedeltaIndex
+ Index or DatetimeIndex or TimedeltaIndex
"""
return super().intersection(other, sort=sort)
@@ -699,7 +710,9 @@ def snap(self, freq="S"):
# we know it conforms; skip check
return DatetimeIndex._simple_new(snapped, name=self.name, tz=self.tz, freq=freq)
- def join(self, other, how="left", level=None, return_indexers=False, sort=False):
+ def join(
+ self, other, how: str = "left", level=None, return_indexers=False, sort=False
+ ):
"""
See Index.join
"""
@@ -840,9 +853,8 @@ def _parsed_string_to_bounds(self, reso, parsed):
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
- "The index must be timezone aware "
- "when indexing with a date string with a "
- "UTC offset"
+ "The index must be timezone aware when indexing "
+ "with a date string with a UTC offset"
)
start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
@@ -851,7 +863,16 @@ def _parsed_string_to_bounds(self, reso, parsed):
end = end.tz_localize(self.tz)
return start, end
- def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
+ def _partial_date_slice(
+ self, reso: str, parsed, use_lhs: bool = True, use_rhs: bool = True
+ ):
+ """
+ Parameters
+ ----------
+ reso : str
+ use_lhs : bool, default True
+ use_rhs : bool, default True
+ """
is_monotonic = self.is_monotonic
if (
is_monotonic
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index fd8ab74ed4920..909643d50e9d7 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -35,7 +35,7 @@ def union(self, other) -> "FrozenList":
Returns
-------
- diff : FrozenList
+ FrozenList
The collection difference between self and other.
"""
if isinstance(other, tuple):
@@ -53,7 +53,7 @@ def difference(self, other) -> "FrozenList":
Returns
-------
- diff : FrozenList
+ FrozenList
The collection difference between self and other.
"""
other = set(other)
@@ -92,7 +92,9 @@ def __hash__(self):
return hash(tuple(self))
def _disabled(self, *args, **kwargs):
- """This method will not function because object is immutable."""
+ """
+ This method will not function because object is immutable.
+ """
raise TypeError(f"'{type(self).__name__}' does not support mutable operations.")
def __str__(self) -> str:
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index f046f0d89c428..dee4c959f8c90 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -143,21 +143,19 @@ def func(intvidx_self, other, sort=False):
result = result.astype(intvidx_self.dtype)
return result
elif intvidx_self.closed != other.closed:
- msg = (
+ raise ValueError(
"can only do set operations between two IntervalIndex "
"objects that are closed on the same side"
)
- raise ValueError(msg)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [intvidx_self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
- msg = (
+ raise TypeError(
f"can only do {self.op_name} between two IntervalIndex "
"objects that have compatible dtypes"
)
- raise TypeError(msg)
return setop(intvidx_self, other, sort)
@@ -210,7 +208,13 @@ class IntervalIndex(IntervalMixin, Index):
# Constructors
def __new__(
- cls, data, closed=None, dtype=None, copy=False, name=None, verify_integrity=True
+ cls,
+ data,
+ closed=None,
+ dtype=None,
+ copy: bool = False,
+ name=None,
+ verify_integrity: bool = True,
):
if name is None and hasattr(data, "name"):
@@ -263,7 +267,9 @@ def _simple_new(cls, array, name, closed=None):
),
)
)
- def from_breaks(cls, breaks, closed="right", name=None, copy=False, dtype=None):
+ def from_breaks(
+ cls, breaks, closed: str = "right", name=None, copy: bool = False, dtype=None
+ ):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(
breaks, closed=closed, copy=copy, dtype=dtype
@@ -288,7 +294,13 @@ def from_breaks(cls, breaks, closed="right", name=None, copy=False, dtype=None):
)
)
def from_arrays(
- cls, left, right, closed="right", name=None, copy=False, dtype=None
+ cls,
+ left,
+ right,
+ closed: str = "right",
+ name=None,
+ copy: bool = False,
+ dtype=None,
):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(
@@ -313,7 +325,9 @@ def from_arrays(
),
)
)
- def from_tuples(cls, data, closed="right", name=None, copy=False, dtype=None):
+ def from_tuples(
+ cls, data, closed: str = "right", name=None, copy: bool = False, dtype=None
+ ):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
return cls._simple_new(arr, name=name)
@@ -329,7 +343,9 @@ def _shallow_copy(self, left=None, right=None, **kwargs):
@cache_readonly
def _isnan(self):
- """Return a mask indicating if each value is NA"""
+ """
+ Return a mask indicating if each value is NA.
+ """
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@@ -351,7 +367,7 @@ def __contains__(self, key) -> bool:
Returns
-------
- boolean
+ bool
"""
if not isinstance(key, Interval):
return False
@@ -470,7 +486,9 @@ def _ndarray_values(self) -> np.ndarray:
return np.array(self._data)
def __array__(self, result=None):
- """ the array interface, return my values """
+ """
+ The array interface, return my values.
+ """
return self._ndarray_values
def __array_wrap__(self, result, context=None):
@@ -503,7 +521,9 @@ def astype(self, dtype, copy=True):
@cache_readonly
def dtype(self):
- """Return the dtype object of the underlying data"""
+ """
+ Return the dtype object of the underlying data.
+ """
return self._data.dtype
@property
@@ -551,7 +571,7 @@ def is_monotonic_decreasing(self) -> bool:
@cache_readonly
def is_unique(self):
"""
- Return True if the IntervalIndex contains unique elements, else False
+ Return True if the IntervalIndex contains unique elements, else False.
"""
left = self.left
right = self.right
@@ -708,7 +728,7 @@ def _needs_i8_conversion(self, key):
Returns
-------
- boolean
+ bool
"""
if is_interval_dtype(key) or isinstance(key, Interval):
return self._needs_i8_conversion(key.left)
@@ -729,7 +749,7 @@ def _maybe_convert_i8(self, key):
Returns
-------
- key: scalar or list-like
+ scalar or list-like
The original key if no conversion occurred, int if converted scalar,
Int64Index if converted list-like.
"""
@@ -775,22 +795,21 @@ def _check_method(self, method):
return
if method in ["bfill", "backfill", "pad", "ffill", "nearest"]:
- msg = f"method {method} not yet implemented for IntervalIndex"
- raise NotImplementedError(msg)
+ raise NotImplementedError(
+ f"method {method} not yet implemented for IntervalIndex"
+ )
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError(
- "can only get slices from an IntervalIndex if "
- "bounds are non-overlapping and all monotonic "
- "increasing or decreasing"
+ "can only get slices from an IntervalIndex if bounds are "
+ "non-overlapping and all monotonic increasing or decreasing"
)
if isinstance(label, IntervalMixin):
- msg = "Interval objects are not currently supported"
- raise NotImplementedError(msg)
+ raise NotImplementedError("Interval objects are not currently supported")
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
@@ -850,7 +869,7 @@ def get_loc(
Returns
-------
- loc : int if unique index, slice if monotonic index, else mask
+ int if unique index, slice if monotonic index, else mask
Examples
--------
@@ -933,11 +952,10 @@ def get_indexer(
self._check_method(method)
if self.is_overlapping:
- msg = (
- "cannot handle overlapping indices; use "
- "IntervalIndex.get_indexer_non_unique"
+ raise InvalidIndexError(
+ "cannot handle overlapping indices; "
+ "use IntervalIndex.get_indexer_non_unique"
)
- raise InvalidIndexError(msg)
target_as_index = ensure_index(target)
@@ -1071,7 +1089,7 @@ def delete(self, loc):
Returns
-------
- new_index : IntervalIndex
+ IntervalIndex
"""
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
@@ -1090,7 +1108,7 @@ def insert(self, loc, item):
Returns
-------
- new_index : IntervalIndex
+ IntervalIndex
"""
if isinstance(item, Interval):
if item.closed != self.closed:
@@ -1117,11 +1135,10 @@ def _concat_same_dtype(self, to_concat, name):
we allow a 0-len index here as well
"""
if not len({i.closed for i in to_concat if len(i)}) == 1:
- msg = (
+ raise ValueError(
"can only append two IntervalIndex objects "
"that are closed on the same side"
)
- raise ValueError(msg)
return super()._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
@@ -1175,10 +1192,13 @@ def _format_data(self, name=None):
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
- summary = f"[{', '.join(head)} ... {', '.join(tail)}]"
+ head_joined = ", ".join(head)
+ tail_joined = ", ".join(tail)
+ summary = f"[{head_joined} ... {tail_joined}]"
else:
tail = [formatter(x) for x in self]
- summary = f"[{', '.join(tail)}]"
+ joined = ", ".join(tail)
+ summary = f"[{joined}]"
return summary + "," + self._format_space()
@@ -1189,7 +1209,7 @@ def _format_attrs(self):
attrs.append(("dtype", f"'{self.dtype}'"))
return attrs
- def _format_space(self):
+ def _format_space(self) -> str:
space = " " * (len(type(self).__name__) + 1)
return f"\n{space}"
@@ -1200,7 +1220,7 @@ def argsort(self, *args, **kwargs):
def equals(self, other) -> bool:
"""
- Determines if two IntervalIndex objects contain the same elements
+ Determines if two IntervalIndex objects contain the same elements.
"""
if self.is_(other):
return True
@@ -1288,7 +1308,7 @@ def _intersection_unique(self, other: "IntervalIndex") -> "IntervalIndex":
Returns
-------
- taken : IntervalIndex
+ IntervalIndex
"""
lindexer = self.left.get_indexer(other.left)
rindexer = self.right.get_indexer(other.right)
@@ -1310,7 +1330,7 @@ def _intersection_non_unique(self, other: "IntervalIndex") -> "IntervalIndex":
Returns
-------
- taken : IntervalIndex
+ IntervalIndex
"""
mask = np.zeros(len(self), dtype=bool)
@@ -1360,7 +1380,9 @@ def is_all_dates(self) -> bool:
def _is_valid_endpoint(endpoint) -> bool:
- """helper for interval_range to check if start/end are valid types"""
+ """
+ Helper for interval_range to check if start/end are valid types.
+ """
return any(
[
is_number(endpoint),
@@ -1372,7 +1394,9 @@ def _is_valid_endpoint(endpoint) -> bool:
def _is_type_compatible(a, b) -> bool:
- """helper for interval_range to check type compat of start/end/freq"""
+ """
+ Helper for interval_range to check type compat of start/end/freq.
+ """
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return (
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 9e434d0f5f704..05a4da28eb0a1 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1,6 +1,7 @@
from collections import OrderedDict
import datetime
from sys import getsizeof
+from typing import List, Optional
import warnings
import numpy as np
@@ -85,7 +86,7 @@ def _codes_to_ints(self, codes):
Returns
-------
- int_keys : scalar or 1-dimensional array, of dtype uint64
+ scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
@@ -125,7 +126,7 @@ def _codes_to_ints(self, codes):
Returns
-------
- int_keys : int, or 1-dimensional array of dtype object
+ int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each).
"""
@@ -248,8 +249,8 @@ def __new__(
dtype=None,
copy=False,
name=None,
- verify_integrity=True,
- _set_identity=True,
+ verify_integrity: bool = True,
+ _set_identity: bool = True,
):
# compat with Index
@@ -287,7 +288,7 @@ def __new__(
return result
- def _validate_codes(self, level: list, code: list):
+ def _validate_codes(self, level: List, code: List):
"""
Reassign code values as -1 if their corresponding levels are NaN.
@@ -300,7 +301,7 @@ def _validate_codes(self, level: list, code: list):
Returns
-------
- code : new code where code value = -1 if it corresponds
+ new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
@@ -308,9 +309,10 @@ def _validate_codes(self, level: list, code: list):
code = np.where(null_mask[code], -1, code)
return code
- def _verify_integrity(self, codes=None, levels=None):
+ def _verify_integrity(
+ self, codes: Optional[List] = None, levels: Optional[List] = None
+ ):
"""
-
Parameters
----------
codes : optional list
@@ -326,7 +328,7 @@ def _verify_integrity(self, codes=None, levels=None):
Returns
-------
- codes : new codes where code value = -1 if it corresponds to a
+ new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
@@ -336,8 +338,8 @@ def _verify_integrity(self, codes=None, levels=None):
if len(levels) != len(codes):
raise ValueError(
- "Length of levels and codes must match. NOTE:"
- " this index is in an inconsistent state."
+ "Length of levels and codes must match. NOTE: "
+ "this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
@@ -389,7 +391,7 @@ def from_arrays(cls, arrays, sortorder=None, names=_no_default_names):
Returns
-------
- index : MultiIndex
+ MultiIndex
See Also
--------
@@ -454,7 +456,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None):
Returns
-------
- index : MultiIndex
+ MultiIndex
See Also
--------
@@ -481,8 +483,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None):
if len(tuples) == 0:
if names is None:
- msg = "Cannot infer number of levels from empty list"
- raise TypeError(msg)
+ raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
@@ -518,7 +519,7 @@ def from_product(cls, iterables, sortorder=None, names=_no_default_names):
Returns
-------
- index : MultiIndex
+ MultiIndex
See Also
--------
@@ -653,15 +654,15 @@ def array(self):
------
ValueError
"""
- msg = (
+ raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
- raise ValueError(msg)
@property
def _is_homogeneous_type(self) -> bool:
- """Whether the levels of a MultiIndex all have the same dtype.
+ """
+ Whether the levels of a MultiIndex all have the same dtype.
This looks at the dtypes of the levels.
@@ -732,7 +733,7 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
- verify_integrity : bool (default True)
+ verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 048bff46759bc..b84c69b8caf51 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -85,8 +85,9 @@ def _validate_dtype(cls, dtype: Dtype) -> None:
validation_func, expected = validation_metadata[cls._typ]
if not validation_func(dtype):
- msg = f"Incorrect `dtype` passed: expected {expected}, received {dtype}"
- raise ValueError(msg)
+ raise ValueError(
+ f"Incorrect `dtype` passed: expected {expected}, received {dtype}"
+ )
@Appender(_index_shared_docs["_maybe_cast_slice_bound"])
def _maybe_cast_slice_bound(self, label, side, kind):
@@ -106,7 +107,6 @@ def _convert_for_op(self, value):
"""
Convert value to be insertable to ndarray.
"""
-
if is_bool(value) or is_bool_dtype(value):
# force conversion to object
# so we don't lose the bools
@@ -121,17 +121,13 @@ def _convert_tolerance(self, tolerance, target):
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
- (
- f"tolerance argument for {type(self).__name__} must contain "
- "numeric elements if it is list type"
- )
+ f"tolerance argument for {type(self).__name__} must contain "
+ "numeric elements if it is list type"
)
else:
raise ValueError(
- (
- f"tolerance argument for {type(self).__name__} must be numeric "
- f"if it is a scalar: {repr(tolerance)}"
- )
+ f"tolerance argument for {type(self).__name__} must be numeric "
+ f"if it is a scalar: {repr(tolerance)}"
)
return tolerance
@@ -244,7 +240,9 @@ class Int64Index(IntegerIndex):
@property
def inferred_type(self) -> str:
- """Always 'integer' for ``Int64Index``"""
+ """
+ Always 'integer' for ``Int64Index``
+ """
return "integer"
@property
@@ -299,7 +297,9 @@ class UInt64Index(IntegerIndex):
@property
def inferred_type(self) -> str:
- """Always 'integer' for ``UInt64Index``"""
+ """
+ Always 'integer' for ``UInt64Index``
+ """
return "integer"
@property
@@ -374,7 +374,9 @@ class Float64Index(NumericIndex):
@property
def inferred_type(self) -> str:
- """Always 'floating' for ``Float64Index``"""
+ """
+ Always 'floating' for ``Float64Index``
+ """
return "floating"
@Appender(_index_shared_docs["astype"])
diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py
index 1d4fa929b2138..bed29e1fd4792 100644
--- a/pandas/io/formats/console.py
+++ b/pandas/io/formats/console.py
@@ -6,7 +6,8 @@
def get_console_size():
- """Return console size as tuple = (width, height).
+ """
+ Return console size as tuple = (width, height).
Returns (None,None) in non-interactive session.
"""
@@ -50,9 +51,13 @@ def get_console_size():
def in_interactive_session():
- """ check if we're running in an interactive shell
+ """
+ Check if we're running in an interactive shell.
- returns True if running under python/ipython interactive shell
+ Returns
+ -------
+ bool
+ True if running under python/ipython interactive shell.
"""
from pandas import get_option
@@ -71,7 +76,11 @@ def check_main():
def in_ipython_frontend():
"""
- check if we're inside an an IPython zmq frontend
+ Check if we're inside an an IPython zmq frontend.
+
+ Returns
+ -------
+ bool
"""
try:
ip = get_ipython() # noqa
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30390 | 2019-12-21T14:55:53Z | 2019-12-21T20:11:01Z | 2019-12-21T20:11:01Z | 2019-12-21T22:25:38Z |
DOC: fix external links + favicon | diff --git a/doc/source/_static/favicon.ico b/doc/source/_static/favicon.ico
deleted file mode 100644
index d15c4803b62e6..0000000000000
Binary files a/doc/source/_static/favicon.ico and /dev/null differ
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 096f1a63eddf8..481c03ab8f388 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -204,7 +204,11 @@
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-# html_theme_options = {}
+html_theme_options = {
+ "external_links": [],
+ "github_url": "https://github.com/pandas-dev/pandas",
+ "twitter_url": "https://twitter.com/pandas_dev",
+}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ["themes"]
@@ -228,7 +232,7 @@
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-html_favicon = os.path.join(html_static_path[0], "favicon.ico")
+html_favicon = "../../web/pandas/static/img/favicon.ico"
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
| Fixes the wrong external links (see https://github.com/pandas-dev/pydata-bootstrap-sphinx-theme/issues/66), and also fixes the favicon | https://api.github.com/repos/pandas-dev/pandas/pulls/30389 | 2019-12-21T08:57:40Z | 2019-12-22T09:56:28Z | 2019-12-22T09:56:28Z | 2019-12-22T09:56:31Z |
DEPR: Deprecate pandas.np module | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
old mode 100644
new mode 100755
index 716784f798a54..d733aefa43385
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -205,6 +205,7 @@ Other enhancements
(:meth:`~DataFrame.to_parquet` / :func:`read_parquet`) using the `'pyarrow'` engine
now preserve those data types with pyarrow >= 1.0.0 (:issue:`20612`).
- The ``partition_cols`` argument in :meth:`DataFrame.to_parquet` now accepts a string (:issue:`27117`)
+- The ``pandas.np`` submodule is now deprecated. Import numpy directly instead (:issue:`30296`)
- :func:`to_parquet` now appropriately handles the ``schema`` argument for user defined schemas in the pyarrow engine. (:issue: `30270`)
- DataFrame constructor preserve `ExtensionArray` dtype with `ExtensionArray` (:issue:`11363`)
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 30b7e5bafe1df..0d4f7bedb77e6 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -105,7 +105,6 @@
to_datetime,
to_timedelta,
# misc
- np,
Grouper,
factorize,
unique,
@@ -189,7 +188,6 @@
__git_version__ = v.get("full-revisionid")
del get_versions, v
-
# GH 27101
# TODO: remove Panel compat in 1.0
if pandas.compat.PY37:
@@ -211,6 +209,20 @@ class Panel:
pass
return Panel
+
+ elif name == "np":
+
+ warnings.warn(
+ "The pandas.np module is deprecated "
+ "and will be removed from pandas in a future version. "
+ "Import numpy directly instead",
+ FutureWarning,
+ stacklevel=2,
+ )
+ import numpy as np
+
+ return np
+
elif name in {"SparseSeries", "SparseDataFrame"}:
warnings.warn(
"The {} class is removed from pandas. Accessing it from "
@@ -236,6 +248,28 @@ class SparseDataFrame:
class SparseSeries:
pass
+ class __numpy:
+ def __init__(self):
+ import numpy as np
+ import warnings
+
+ self.np = np
+ self.warnings = warnings
+
+ def __getattr__(self, item):
+ self.warnings.warn(
+ "The pandas.np module is deprecated "
+ "and will be removed from pandas in a future version. "
+ "Import numpy directly instead",
+ FutureWarning,
+ stacklevel=2,
+ )
+ try:
+ return getattr(self.np, item)
+ except AttributeError:
+ raise AttributeError(f"module numpy has no attribute {item}")
+
+ np = __numpy()
# module level doc-string
__doc__ = """
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 5261801600111..b0b65f9d0be34 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -1,7 +1,5 @@
# flake8: noqa
-import numpy as np
-
from pandas._libs import NaT, Period, Timedelta, Timestamp
from pandas._libs.missing import NA
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 900ba878e4c0a..d586dbbb72831 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -20,7 +20,6 @@ def check(self, namespace, expected, ignored=None):
class TestPDApi(Base):
-
# these are optionally imported based on testing
# & need to be ignored
ignored = ["tests", "locale", "conftest"]
@@ -93,6 +92,7 @@ class TestPDApi(Base):
]
if not compat.PY37:
classes.extend(["Panel", "SparseSeries", "SparseDataFrame"])
+ deprecated_modules.append("np")
# these are already deprecated; awaiting removal
deprecated_classes: List[str] = []
@@ -101,7 +101,7 @@ class TestPDApi(Base):
deprecated_classes_in_future: List[str] = []
# external modules exposed in pandas namespace
- modules = ["np", "datetime"]
+ modules = ["datetime"]
# top-level functions
funcs = [
@@ -220,22 +220,43 @@ def test_api(self):
self.ignored,
)
+ def test_depr(self):
+ deprecated = (
+ self.deprecated_modules
+ + self.deprecated_classes
+ + self.deprecated_classes_in_future
+ + self.deprecated_funcs
+ + self.deprecated_funcs_in_future
+ )
+ for depr in deprecated:
+ with tm.assert_produces_warning(FutureWarning):
+ if compat.PY37:
+ getattr(pd, depr)
+ else:
+ deprecated = getattr(pd, depr)
+ deprecated.__getattr__(dir(deprecated)[-1])
-class TestApi(Base):
+def test_np():
+ import numpy as np
+ import warnings
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", FutureWarning)
+ assert (pd.np.arange(0, 10) == np.arange(0, 10)).all()
+
+
+class TestApi(Base):
allowed = ["types", "extensions", "indexers"]
def test_api(self):
-
self.check(api, self.allowed)
class TestTesting(Base):
-
funcs = ["assert_frame_equal", "assert_series_equal", "assert_index_equal"]
def test_testing(self):
-
from pandas import testing
self.check(testing, self.funcs)
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index c8f0d3b3fe553..e17f0af24760c 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -679,7 +679,7 @@ def test_indexing(self):
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range("2002-01", "2003-12", freq="M")
- df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
+ df = pd.DataFrame(np.random.randn(24, 10), index=idx)
tm.assert_frame_equal(df, df.loc[idx])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df, df.loc[list(idx)])
| - [x] closes #30296
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30386 | 2019-12-20T23:25:25Z | 2019-12-27T16:52:54Z | 2019-12-27T16:52:54Z | 2019-12-30T23:28:09Z |
REF: targeted test files for nlargest, searchsorted, value_counts | diff --git a/pandas/tests/frame/methods/test_nlargest.py b/pandas/tests/frame/methods/test_nlargest.py
new file mode 100644
index 0000000000000..72299ad6b2bf6
--- /dev/null
+++ b/pandas/tests/frame/methods/test_nlargest.py
@@ -0,0 +1,211 @@
+"""
+Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo"
+but are implicitly also testing nsmallest_foo.
+"""
+from string import ascii_lowercase
+
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas.util.testing as tm
+
+
+@pytest.fixture
+def df_duplicates():
+ return pd.DataFrame(
+ {"a": [1, 2, 3, 4, 4], "b": [1, 1, 1, 1, 1], "c": [0, 1, 2, 5, 4]},
+ index=[0, 0, 1, 1, 1],
+ )
+
+
+@pytest.fixture
+def df_strings():
+ return pd.DataFrame(
+ {
+ "a": np.random.permutation(10),
+ "b": list(ascii_lowercase[:10]),
+ "c": np.random.permutation(10).astype("float64"),
+ }
+ )
+
+
+@pytest.fixture
+def df_main_dtypes():
+ return pd.DataFrame(
+ {
+ "group": [1, 1, 2],
+ "int": [1, 2, 3],
+ "float": [4.0, 5.0, 6.0],
+ "string": list("abc"),
+ "category_string": pd.Series(list("abc")).astype("category"),
+ "category_int": [7, 8, 9],
+ "datetime": pd.date_range("20130101", periods=3),
+ "datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
+ "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
+ },
+ columns=[
+ "group",
+ "int",
+ "float",
+ "string",
+ "category_string",
+ "category_int",
+ "datetime",
+ "datetimetz",
+ "timedelta",
+ ],
+ )
+
+
+class TestNLargestNSmallest:
+
+ # ----------------------------------------------------------------------
+ # Top / bottom
+ @pytest.mark.parametrize(
+ "order",
+ [
+ ["a"],
+ ["c"],
+ ["a", "b"],
+ ["a", "c"],
+ ["b", "a"],
+ ["b", "c"],
+ ["a", "b", "c"],
+ ["c", "a", "b"],
+ ["c", "b", "a"],
+ ["b", "c", "a"],
+ ["b", "a", "c"],
+ # dups!
+ ["b", "c", "c"],
+ ],
+ )
+ @pytest.mark.parametrize("n", range(1, 11))
+ def test_nlargest_n(self, df_strings, nselect_method, n, order):
+ # GH#10393
+ df = df_strings
+ if "b" in order:
+
+ error_msg = (
+ f"Column 'b' has dtype object, "
+ f"cannot use method '{nselect_method}' with this dtype"
+ )
+ with pytest.raises(TypeError, match=error_msg):
+ getattr(df, nselect_method)(n, order)
+ else:
+ ascending = nselect_method == "nsmallest"
+ result = getattr(df, nselect_method)(n, order)
+ expected = df.sort_values(order, ascending=ascending).head(n)
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "columns", [["group", "category_string"], ["group", "string"]]
+ )
+ def test_nlargest_error(self, df_main_dtypes, nselect_method, columns):
+ df = df_main_dtypes
+ col = columns[1]
+ error_msg = (
+ f"Column '{col}' has dtype {df[col].dtype}, "
+ f"cannot use method '{nselect_method}' with this dtype"
+ )
+ # escape some characters that may be in the repr
+ error_msg = (
+ error_msg.replace("(", "\\(")
+ .replace(")", "\\)")
+ .replace("[", "\\[")
+ .replace("]", "\\]")
+ )
+ with pytest.raises(TypeError, match=error_msg):
+ getattr(df, nselect_method)(2, columns)
+
+ def test_nlargest_all_dtypes(self, df_main_dtypes):
+ df = df_main_dtypes
+ df.nsmallest(2, list(set(df) - {"category_string", "string"}))
+ df.nlargest(2, list(set(df) - {"category_string", "string"}))
+
+ def test_nlargest_duplicates_on_starter_columns(self):
+ # regression test for GH#22752
+
+ df = pd.DataFrame({"a": [2, 2, 2, 1, 1, 1], "b": [1, 2, 3, 3, 2, 1]})
+
+ result = df.nlargest(4, columns=["a", "b"])
+ expected = pd.DataFrame(
+ {"a": [2, 2, 2, 1], "b": [3, 2, 1, 3]}, index=[2, 1, 0, 3]
+ )
+ tm.assert_frame_equal(result, expected)
+
+ result = df.nsmallest(4, columns=["a", "b"])
+ expected = pd.DataFrame(
+ {"a": [1, 1, 1, 2], "b": [1, 2, 3, 1]}, index=[5, 4, 3, 0]
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_nlargest_n_identical_values(self):
+ # GH#15297
+ df = pd.DataFrame({"a": [1] * 5, "b": [1, 2, 3, 4, 5]})
+
+ result = df.nlargest(3, "a")
+ expected = pd.DataFrame({"a": [1] * 3, "b": [1, 2, 3]}, index=[0, 1, 2])
+ tm.assert_frame_equal(result, expected)
+
+ result = df.nsmallest(3, "a")
+ expected = pd.DataFrame({"a": [1] * 3, "b": [1, 2, 3]})
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "order",
+ [["a", "b", "c"], ["c", "b", "a"], ["a"], ["b"], ["a", "b"], ["c", "b"]],
+ )
+ @pytest.mark.parametrize("n", range(1, 6))
+ def test_nlargest_n_duplicate_index(self, df_duplicates, n, order):
+ # GH#13412
+
+ df = df_duplicates
+ result = df.nsmallest(n, order)
+ expected = df.sort_values(order).head(n)
+ tm.assert_frame_equal(result, expected)
+
+ result = df.nlargest(n, order)
+ expected = df.sort_values(order, ascending=False).head(n)
+ tm.assert_frame_equal(result, expected)
+
+ def test_nlargest_duplicate_keep_all_ties(self):
+ # GH#16818
+ df = pd.DataFrame(
+ {"a": [5, 4, 4, 2, 3, 3, 3, 3], "b": [10, 9, 8, 7, 5, 50, 10, 20]}
+ )
+ result = df.nlargest(4, "a", keep="all")
+ expected = pd.DataFrame(
+ {
+ "a": {0: 5, 1: 4, 2: 4, 4: 3, 5: 3, 6: 3, 7: 3},
+ "b": {0: 10, 1: 9, 2: 8, 4: 5, 5: 50, 6: 10, 7: 20},
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+ result = df.nsmallest(2, "a", keep="all")
+ expected = pd.DataFrame(
+ {
+ "a": {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
+ "b": {3: 7, 4: 5, 5: 50, 6: 10, 7: 20},
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_nlargest_multiindex_column_lookup(self):
+ # Check whether tuples are correctly treated as multi-level lookups.
+ # GH#23033
+ df = pd.DataFrame(
+ columns=pd.MultiIndex.from_product([["x"], ["a", "b"]]),
+ data=[[0.33, 0.13], [0.86, 0.25], [0.25, 0.70], [0.85, 0.91]],
+ )
+
+ # nsmallest
+ result = df.nsmallest(3, ("x", "a"))
+ expected = df.iloc[[2, 0, 3]]
+ tm.assert_frame_equal(result, expected)
+
+ # nlargest
+ result = df.nlargest(3, ("x", "b"))
+ expected = df.iloc[[3, 2, 1]]
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 0653c9dc5f91b..ee9329da4e5e1 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1,7 +1,6 @@
from datetime import timedelta
from decimal import Decimal
import operator
-from string import ascii_lowercase
import warnings
import numpy as np
@@ -2442,194 +2441,16 @@ def test_matmul(self):
with pytest.raises(ValueError, match="aligned"):
operator.matmul(df, df2)
+ # ---------------------------------------------------------------------
+ # Unsorted
-@pytest.fixture
-def df_duplicates():
- return pd.DataFrame(
- {"a": [1, 2, 3, 4, 4], "b": [1, 1, 1, 1, 1], "c": [0, 1, 2, 5, 4]},
- index=[0, 0, 1, 1, 1],
- )
-
-
-@pytest.fixture
-def df_strings():
- return pd.DataFrame(
- {
- "a": np.random.permutation(10),
- "b": list(ascii_lowercase[:10]),
- "c": np.random.permutation(10).astype("float64"),
- }
- )
-
-
-@pytest.fixture
-def df_main_dtypes():
- return pd.DataFrame(
- {
- "group": [1, 1, 2],
- "int": [1, 2, 3],
- "float": [4.0, 5.0, 6.0],
- "string": list("abc"),
- "category_string": pd.Series(list("abc")).astype("category"),
- "category_int": [7, 8, 9],
- "datetime": pd.date_range("20130101", periods=3),
- "datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
- "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
- },
- columns=[
- "group",
- "int",
- "float",
- "string",
- "category_string",
- "category_int",
- "datetime",
- "datetimetz",
- "timedelta",
- ],
- )
-
-
-class TestNLargestNSmallest:
-
- # ----------------------------------------------------------------------
- # Top / bottom
- @pytest.mark.parametrize(
- "order",
- [
- ["a"],
- ["c"],
- ["a", "b"],
- ["a", "c"],
- ["b", "a"],
- ["b", "c"],
- ["a", "b", "c"],
- ["c", "a", "b"],
- ["c", "b", "a"],
- ["b", "c", "a"],
- ["b", "a", "c"],
- # dups!
- ["b", "c", "c"],
- ],
- )
- @pytest.mark.parametrize("n", range(1, 11))
- def test_n(self, df_strings, nselect_method, n, order):
- # GH 10393
- df = df_strings
- if "b" in order:
-
- error_msg = (
- f"Column 'b' has dtype object, "
- f"cannot use method '{nselect_method}' with this dtype"
- )
- with pytest.raises(TypeError, match=error_msg):
- getattr(df, nselect_method)(n, order)
- else:
- ascending = nselect_method == "nsmallest"
- result = getattr(df, nselect_method)(n, order)
- expected = df.sort_values(order, ascending=ascending).head(n)
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize(
- "columns", [["group", "category_string"], ["group", "string"]]
- )
- def test_n_error(self, df_main_dtypes, nselect_method, columns):
- df = df_main_dtypes
- col = columns[1]
- error_msg = (
- f"Column '{col}' has dtype {df[col].dtype}, "
- f"cannot use method '{nselect_method}' with this dtype"
- )
- # escape some characters that may be in the repr
- error_msg = (
- error_msg.replace("(", "\\(")
- .replace(")", "\\)")
- .replace("[", "\\[")
- .replace("]", "\\]")
- )
- with pytest.raises(TypeError, match=error_msg):
- getattr(df, nselect_method)(2, columns)
-
- def test_n_all_dtypes(self, df_main_dtypes):
- df = df_main_dtypes
- df.nsmallest(2, list(set(df) - {"category_string", "string"}))
- df.nlargest(2, list(set(df) - {"category_string", "string"}))
-
- @pytest.mark.parametrize(
- "method,expected",
- [
- (
- "nlargest",
- pd.DataFrame(
- {"a": [2, 2, 2, 1], "b": [3, 2, 1, 3]}, index=[2, 1, 0, 3]
- ),
- ),
- (
- "nsmallest",
- pd.DataFrame(
- {"a": [1, 1, 1, 2], "b": [1, 2, 3, 1]}, index=[5, 4, 3, 0]
- ),
- ),
- ],
- )
- def test_duplicates_on_starter_columns(self, method, expected):
- # regression test for #22752
-
- df = pd.DataFrame({"a": [2, 2, 2, 1, 1, 1], "b": [1, 2, 3, 3, 2, 1]})
-
- result = getattr(df, method)(4, columns=["a", "b"])
- tm.assert_frame_equal(result, expected)
-
- def test_n_identical_values(self):
- # GH 15297
- df = pd.DataFrame({"a": [1] * 5, "b": [1, 2, 3, 4, 5]})
-
- result = df.nlargest(3, "a")
- expected = pd.DataFrame({"a": [1] * 3, "b": [1, 2, 3]}, index=[0, 1, 2])
- tm.assert_frame_equal(result, expected)
-
- result = df.nsmallest(3, "a")
- expected = pd.DataFrame({"a": [1] * 3, "b": [1, 2, 3]})
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize(
- "order",
- [["a", "b", "c"], ["c", "b", "a"], ["a"], ["b"], ["a", "b"], ["c", "b"]],
- )
- @pytest.mark.parametrize("n", range(1, 6))
- def test_n_duplicate_index(self, df_duplicates, n, order):
- # GH 13412
-
- df = df_duplicates
- result = df.nsmallest(n, order)
- expected = df.sort_values(order).head(n)
- tm.assert_frame_equal(result, expected)
-
- result = df.nlargest(n, order)
- expected = df.sort_values(order, ascending=False).head(n)
- tm.assert_frame_equal(result, expected)
-
- def test_duplicate_keep_all_ties(self):
- # GH 16818
- df = pd.DataFrame(
- {"a": [5, 4, 4, 2, 3, 3, 3, 3], "b": [10, 9, 8, 7, 5, 50, 10, 20]}
- )
- result = df.nlargest(4, "a", keep="all")
- expected = pd.DataFrame(
- {
- "a": {0: 5, 1: 4, 2: 4, 4: 3, 5: 3, 6: 3, 7: 3},
- "b": {0: 10, 1: 9, 2: 8, 4: 5, 5: 50, 6: 10, 7: 20},
- }
- )
- tm.assert_frame_equal(result, expected)
-
- result = df.nsmallest(2, "a", keep="all")
- expected = pd.DataFrame(
- {
- "a": {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
- "b": {3: 7, 4: 5, 5: 50, 6: 10, 7: 20},
- }
- )
+ def test_series_nat_conversion(self):
+ # GH 18521
+ # Check rank does not mutate DataFrame
+ df = DataFrame(np.random.randn(10, 3), dtype="float64")
+ expected = df.copy()
+ df.rank()
+ result = df
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
@@ -2644,30 +2465,3 @@ def test_series_broadcasting(self):
df_nan.clip(lower=s, axis=0)
for op in ["lt", "le", "gt", "ge", "eq", "ne"]:
getattr(df, op)(s_nan, axis=0)
-
- def test_series_nat_conversion(self):
- # GH 18521
- # Check rank does not mutate DataFrame
- df = DataFrame(np.random.randn(10, 3), dtype="float64")
- expected = df.copy()
- df.rank()
- result = df
- tm.assert_frame_equal(result, expected)
-
- def test_multiindex_column_lookup(self):
- # Check whether tuples are correctly treated as multi-level lookups.
- # GH 23033
- df = pd.DataFrame(
- columns=pd.MultiIndex.from_product([["x"], ["a", "b"]]),
- data=[[0.33, 0.13], [0.86, 0.25], [0.25, 0.70], [0.85, 0.91]],
- )
-
- # nsmallest
- result = df.nsmallest(3, ("x", "a"))
- expected = df.iloc[[2, 0, 3]]
- tm.assert_frame_equal(result, expected)
-
- # nlargest
- result = df.nlargest(3, ("x", "b"))
- expected = df.iloc[[3, 2, 1]]
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_nlargest.py b/pandas/tests/series/methods/test_nlargest.py
new file mode 100644
index 0000000000000..423b4ad78a78a
--- /dev/null
+++ b/pandas/tests/series/methods/test_nlargest.py
@@ -0,0 +1,213 @@
+"""
+Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo"
+but are implicitly also testing nsmallest_foo.
+"""
+from itertools import product
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import Series
+import pandas.util.testing as tm
+
+main_dtypes = [
+ "datetime",
+ "datetimetz",
+ "timedelta",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "float32",
+ "float64",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+]
+
+
+@pytest.fixture
+def s_main_dtypes():
+ """
+ A DataFrame with many dtypes
+
+ * datetime
+ * datetimetz
+ * timedelta
+ * [u]int{8,16,32,64}
+ * float{32,64}
+
+ The columns are the name of the dtype.
+ """
+ df = pd.DataFrame(
+ {
+ "datetime": pd.to_datetime(["2003", "2002", "2001", "2002", "2005"]),
+ "datetimetz": pd.to_datetime(
+ ["2003", "2002", "2001", "2002", "2005"]
+ ).tz_localize("US/Eastern"),
+ "timedelta": pd.to_timedelta(["3d", "2d", "1d", "2d", "5d"]),
+ }
+ )
+
+ for dtype in [
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "float32",
+ "float64",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ ]:
+ df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)
+
+ return df
+
+
+@pytest.fixture(params=main_dtypes)
+def s_main_dtypes_split(request, s_main_dtypes):
+ """Each series in s_main_dtypes."""
+ return s_main_dtypes[request.param]
+
+
+def assert_check_nselect_boundary(vals, dtype, method):
+ # helper function for 'test_boundary_{dtype}' tests
+ ser = Series(vals, dtype=dtype)
+ result = getattr(ser, method)(3)
+ expected_idxr = [0, 1, 2] if method == "nsmallest" else [3, 2, 1]
+ expected = ser.loc[expected_idxr]
+ tm.assert_series_equal(result, expected)
+
+
+class TestSeriesNLargestNSmallest:
+ @pytest.mark.parametrize(
+ "r",
+ [
+ Series([3.0, 2, 1, 2, "5"], dtype="object"),
+ Series([3.0, 2, 1, 2, 5], dtype="object"),
+ # not supported on some archs
+ # Series([3., 2, 1, 2, 5], dtype='complex256'),
+ Series([3.0, 2, 1, 2, 5], dtype="complex128"),
+ Series(list("abcde")),
+ Series(list("abcde"), dtype="category"),
+ ],
+ )
+ def test_nlargest_error(self, r):
+ dt = r.dtype
+ msg = "Cannot use method 'n(larg|small)est' with dtype {dt}".format(dt=dt)
+ args = 2, len(r), 0, -1
+ methods = r.nlargest, r.nsmallest
+ for method, arg in product(methods, args):
+ with pytest.raises(TypeError, match=msg):
+ method(arg)
+
+ def test_nsmallest_nlargest(self, s_main_dtypes_split):
+ # float, int, datetime64 (use i8), timedelts64 (same),
+ # object that are numbers, object that are strings
+ ser = s_main_dtypes_split
+
+ tm.assert_series_equal(ser.nsmallest(2), ser.iloc[[2, 1]])
+ tm.assert_series_equal(ser.nsmallest(2, keep="last"), ser.iloc[[2, 3]])
+
+ empty = ser.iloc[0:0]
+ tm.assert_series_equal(ser.nsmallest(0), empty)
+ tm.assert_series_equal(ser.nsmallest(-1), empty)
+ tm.assert_series_equal(ser.nlargest(0), empty)
+ tm.assert_series_equal(ser.nlargest(-1), empty)
+
+ tm.assert_series_equal(ser.nsmallest(len(ser)), ser.sort_values())
+ tm.assert_series_equal(ser.nsmallest(len(ser) + 1), ser.sort_values())
+ tm.assert_series_equal(ser.nlargest(len(ser)), ser.iloc[[4, 0, 1, 3, 2]])
+ tm.assert_series_equal(ser.nlargest(len(ser) + 1), ser.iloc[[4, 0, 1, 3, 2]])
+
+ def test_nlargest_misc(self):
+
+ ser = Series([3.0, np.nan, 1, 2, 5])
+ tm.assert_series_equal(ser.nlargest(), ser.iloc[[4, 0, 3, 2]])
+ tm.assert_series_equal(ser.nsmallest(), ser.iloc[[2, 3, 0, 4]])
+
+ msg = 'keep must be either "first", "last"'
+ with pytest.raises(ValueError, match=msg):
+ ser.nsmallest(keep="invalid")
+ with pytest.raises(ValueError, match=msg):
+ ser.nlargest(keep="invalid")
+
+ # GH#15297
+ ser = Series([1] * 5, index=[1, 2, 3, 4, 5])
+ expected_first = Series([1] * 3, index=[1, 2, 3])
+ expected_last = Series([1] * 3, index=[5, 4, 3])
+
+ result = ser.nsmallest(3)
+ tm.assert_series_equal(result, expected_first)
+
+ result = ser.nsmallest(3, keep="last")
+ tm.assert_series_equal(result, expected_last)
+
+ result = ser.nlargest(3)
+ tm.assert_series_equal(result, expected_first)
+
+ result = ser.nlargest(3, keep="last")
+ tm.assert_series_equal(result, expected_last)
+
+ @pytest.mark.parametrize("n", range(1, 5))
+ def test_nlargest_n(self, n):
+
+ # GH 13412
+ ser = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
+ result = ser.nlargest(n)
+ expected = ser.sort_values(ascending=False).head(n)
+ tm.assert_series_equal(result, expected)
+
+ result = ser.nsmallest(n)
+ expected = ser.sort_values().head(n)
+ tm.assert_series_equal(result, expected)
+
+ def test_nlargest_boundary_integer(self, nselect_method, any_int_dtype):
+ # GH#21426
+ dtype_info = np.iinfo(any_int_dtype)
+ min_val, max_val = dtype_info.min, dtype_info.max
+ vals = [min_val, min_val + 1, max_val - 1, max_val]
+ assert_check_nselect_boundary(vals, any_int_dtype, nselect_method)
+
+ def test_nlargest_boundary_float(self, nselect_method, float_dtype):
+ # GH#21426
+ dtype_info = np.finfo(float_dtype)
+ min_val, max_val = dtype_info.min, dtype_info.max
+ min_2nd, max_2nd = np.nextafter([min_val, max_val], 0, dtype=float_dtype)
+ vals = [min_val, min_2nd, max_2nd, max_val]
+ assert_check_nselect_boundary(vals, float_dtype, nselect_method)
+
+ @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
+ def test_nlargest_boundary_datetimelike(self, nselect_method, dtype):
+ # GH#21426
+ # use int64 bounds and +1 to min_val since true minimum is NaT
+ # (include min_val/NaT at end to maintain same expected_idxr)
+ dtype_info = np.iinfo("int64")
+ min_val, max_val = dtype_info.min, dtype_info.max
+ vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val]
+ assert_check_nselect_boundary(vals, dtype, nselect_method)
+
+ def test_nlargest_duplicate_keep_all_ties(self):
+ # see GH#16818
+ ser = Series([10, 9, 8, 7, 7, 7, 7, 6])
+ result = ser.nlargest(4, keep="all")
+ expected = Series([10, 9, 8, 7, 7, 7, 7])
+ tm.assert_series_equal(result, expected)
+
+ result = ser.nsmallest(2, keep="all")
+ expected = Series([6, 7, 7, 7, 7], index=[7, 3, 4, 5, 6])
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "data,expected", [([True, False], [True]), ([True, False, True, True], [True])]
+ )
+ def test_nlargest_boolean(self, data, expected):
+ # GH#26154 : ensure True > False
+ ser = Series(data)
+ result = ser.nlargest(1)
+ expected = Series(expected)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_searchsorted.py b/pandas/tests/series/methods/test_searchsorted.py
new file mode 100644
index 0000000000000..0d6e9635579f0
--- /dev/null
+++ b/pandas/tests/series/methods/test_searchsorted.py
@@ -0,0 +1,55 @@
+import numpy as np
+
+from pandas import Series, Timestamp, date_range
+from pandas.api.types import is_scalar
+import pandas.util.testing as tm
+
+
+class TestSeriesSearchSorted:
+ def test_searchsorted(self):
+ ser = Series([1, 2, 3])
+
+ result = ser.searchsorted(1, side="left")
+ assert is_scalar(result)
+ assert result == 0
+
+ result = ser.searchsorted(1, side="right")
+ assert is_scalar(result)
+ assert result == 1
+
+ def test_searchsorted_numeric_dtypes_scalar(self):
+ ser = Series([1, 2, 90, 1000, 3e9])
+ res = ser.searchsorted(30)
+ assert is_scalar(res)
+ assert res == 2
+
+ res = ser.searchsorted([30])
+ exp = np.array([2], dtype=np.intp)
+ tm.assert_numpy_array_equal(res, exp)
+
+ def test_searchsorted_numeric_dtypes_vector(self):
+ ser = Series([1, 2, 90, 1000, 3e9])
+ res = ser.searchsorted([91, 2e6])
+ exp = np.array([3, 4], dtype=np.intp)
+ tm.assert_numpy_array_equal(res, exp)
+
+ def test_searchsorted_datetime64_scalar(self):
+ ser = Series(date_range("20120101", periods=10, freq="2D"))
+ val = Timestamp("20120102")
+ res = ser.searchsorted(val)
+ assert is_scalar(res)
+ assert res == 1
+
+ def test_searchsorted_datetime64_list(self):
+ ser = Series(date_range("20120101", periods=10, freq="2D"))
+ vals = [Timestamp("20120102"), Timestamp("20120104")]
+ res = ser.searchsorted(vals)
+ exp = np.array([1, 2], dtype=np.intp)
+ tm.assert_numpy_array_equal(res, exp)
+
+ def test_searchsorted_sorter(self):
+ # GH8490
+ ser = Series([3, 1, 2])
+ res = ser.searchsorted([0, 3], sorter=np.argsort(ser))
+ exp = np.array([0, 2], dtype=np.intp)
+ tm.assert_numpy_array_equal(res, exp)
diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py
new file mode 100644
index 0000000000000..15d895f44c7b2
--- /dev/null
+++ b/pandas/tests/series/methods/test_value_counts.py
@@ -0,0 +1,179 @@
+import numpy as np
+
+import pandas as pd
+from pandas import Categorical, CategoricalIndex, Series
+import pandas.util.testing as tm
+
+
+class TestSeriesValueCounts:
+ def test_value_counts_datetime(self):
+ # most dtypes are tested in tests/base
+ values = [
+ pd.Timestamp("2011-01-01 09:00"),
+ pd.Timestamp("2011-01-01 10:00"),
+ pd.Timestamp("2011-01-01 11:00"),
+ pd.Timestamp("2011-01-01 09:00"),
+ pd.Timestamp("2011-01-01 09:00"),
+ pd.Timestamp("2011-01-01 11:00"),
+ ]
+
+ exp_idx = pd.DatetimeIndex(
+ ["2011-01-01 09:00", "2011-01-01 11:00", "2011-01-01 10:00"]
+ )
+ exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+
+ ser = pd.Series(values, name="xxx")
+ tm.assert_series_equal(ser.value_counts(), exp)
+ # check DatetimeIndex outputs the same result
+ idx = pd.DatetimeIndex(values, name="xxx")
+ tm.assert_series_equal(idx.value_counts(), exp)
+
+ # normalize
+ exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ tm.assert_series_equal(ser.value_counts(normalize=True), exp)
+ tm.assert_series_equal(idx.value_counts(normalize=True), exp)
+
+ def test_value_counts_datetime_tz(self):
+ values = [
+ pd.Timestamp("2011-01-01 09:00", tz="US/Eastern"),
+ pd.Timestamp("2011-01-01 10:00", tz="US/Eastern"),
+ pd.Timestamp("2011-01-01 11:00", tz="US/Eastern"),
+ pd.Timestamp("2011-01-01 09:00", tz="US/Eastern"),
+ pd.Timestamp("2011-01-01 09:00", tz="US/Eastern"),
+ pd.Timestamp("2011-01-01 11:00", tz="US/Eastern"),
+ ]
+
+ exp_idx = pd.DatetimeIndex(
+ ["2011-01-01 09:00", "2011-01-01 11:00", "2011-01-01 10:00"],
+ tz="US/Eastern",
+ )
+ exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+
+ ser = pd.Series(values, name="xxx")
+ tm.assert_series_equal(ser.value_counts(), exp)
+ idx = pd.DatetimeIndex(values, name="xxx")
+ tm.assert_series_equal(idx.value_counts(), exp)
+
+ exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ tm.assert_series_equal(ser.value_counts(normalize=True), exp)
+ tm.assert_series_equal(idx.value_counts(normalize=True), exp)
+
+ def test_value_counts_period(self):
+ values = [
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2011-02", freq="M"),
+ pd.Period("2011-03", freq="M"),
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2011-03", freq="M"),
+ ]
+
+ exp_idx = pd.PeriodIndex(["2011-01", "2011-03", "2011-02"], freq="M")
+ exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+
+ ser = pd.Series(values, name="xxx")
+ tm.assert_series_equal(ser.value_counts(), exp)
+ # check DatetimeIndex outputs the same result
+ idx = pd.PeriodIndex(values, name="xxx")
+ tm.assert_series_equal(idx.value_counts(), exp)
+
+ # normalize
+ exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ tm.assert_series_equal(ser.value_counts(normalize=True), exp)
+ tm.assert_series_equal(idx.value_counts(normalize=True), exp)
+
+ def test_value_counts_categorical_ordered(self):
+ # most dtypes are tested in tests/base
+ values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True)
+
+ exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=True)
+ exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+
+ ser = pd.Series(values, name="xxx")
+ tm.assert_series_equal(ser.value_counts(), exp)
+ # check CategoricalIndex outputs the same result
+ idx = pd.CategoricalIndex(values, name="xxx")
+ tm.assert_series_equal(idx.value_counts(), exp)
+
+ # normalize
+ exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ tm.assert_series_equal(ser.value_counts(normalize=True), exp)
+ tm.assert_series_equal(idx.value_counts(normalize=True), exp)
+
+ def test_value_counts_categorical_not_ordered(self):
+ values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False)
+
+ exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=False)
+ exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+
+ ser = pd.Series(values, name="xxx")
+ tm.assert_series_equal(ser.value_counts(), exp)
+ # check CategoricalIndex outputs the same result
+ idx = pd.CategoricalIndex(values, name="xxx")
+ tm.assert_series_equal(idx.value_counts(), exp)
+
+ # normalize
+ exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ tm.assert_series_equal(ser.value_counts(normalize=True), exp)
+ tm.assert_series_equal(idx.value_counts(normalize=True), exp)
+
+ def test_value_counts_categorical(self):
+ # GH#12835
+ cats = Categorical(list("abcccb"), categories=list("cabd"))
+ ser = Series(cats, name="xxx")
+ res = ser.value_counts(sort=False)
+
+ exp_index = CategoricalIndex(list("cabd"), categories=cats.categories)
+ exp = Series([3, 1, 2, 0], name="xxx", index=exp_index)
+ tm.assert_series_equal(res, exp)
+
+ res = ser.value_counts(sort=True)
+
+ exp_index = CategoricalIndex(list("cbad"), categories=cats.categories)
+ exp = Series([3, 2, 1, 0], name="xxx", index=exp_index)
+ tm.assert_series_equal(res, exp)
+
+ # check object dtype handles the Series.name as the same
+ # (tested in tests/base)
+ ser = Series(["a", "b", "c", "c", "c", "b"], name="xxx")
+ res = ser.value_counts()
+ exp = Series([3, 2, 1], name="xxx", index=["c", "b", "a"])
+ tm.assert_series_equal(res, exp)
+
+ def test_value_counts_categorical_with_nan(self):
+ # see GH#9443
+
+ # sanity check
+ ser = Series(["a", "b", "a"], dtype="category")
+ exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
+
+ res = ser.value_counts(dropna=True)
+ tm.assert_series_equal(res, exp)
+
+ res = ser.value_counts(dropna=True)
+ tm.assert_series_equal(res, exp)
+
+ # same Series via two different constructions --> same behaviour
+ series = [
+ Series(["a", "b", None, "a", None, None], dtype="category"),
+ Series(
+ Categorical(["a", "b", None, "a", None, None], categories=["a", "b"])
+ ),
+ ]
+
+ for ser in series:
+ # None is a NaN value, so we exclude its count here
+ exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
+ res = ser.value_counts(dropna=True)
+ tm.assert_series_equal(res, exp)
+
+ # we don't exclude the count of None and sort by counts
+ exp = Series([3, 2, 1], index=CategoricalIndex([np.nan, "a", "b"]))
+ res = ser.value_counts(dropna=False)
+ tm.assert_series_equal(res, exp)
+
+ # When we aren't sorting by counts, and np.nan isn't a
+ # category, it should be last.
+ exp = Series([2, 1, 3], index=CategoricalIndex(["a", "b", np.nan]))
+ res = ser.value_counts(dropna=False, sort=False)
+ tm.assert_series_equal(res, exp)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 148c376eba752..6b85714d06594 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1,4 +1,3 @@
-from itertools import product
import operator
import numpy as np
@@ -7,17 +6,7 @@
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import (
- Categorical,
- CategoricalIndex,
- DataFrame,
- MultiIndex,
- Series,
- date_range,
- isna,
- notna,
-)
-from pandas.api.types import is_scalar
+from pandas import Categorical, DataFrame, MultiIndex, Series, date_range, isna, notna
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas.util.testing as tm
@@ -637,36 +626,6 @@ def test_clip_with_datetimes(self):
)
tm.assert_series_equal(result, expected)
- def test_cummethods_bool(self):
- # GH 6270
-
- a = pd.Series([False, False, False, True, True, False, False])
- b = ~a
- c = pd.Series([False] * len(b))
- d = ~c
- methods = {
- "cumsum": np.cumsum,
- "cumprod": np.cumprod,
- "cummin": np.minimum.accumulate,
- "cummax": np.maximum.accumulate,
- }
- args = product((a, b, c, d), methods)
- for s, method in args:
- expected = Series(methods[method](s.values))
- result = getattr(s, method)()
- tm.assert_series_equal(result, expected)
-
- e = pd.Series([False, True, np.nan, False])
- cse = pd.Series([0, 1, np.nan, 1], dtype=object)
- cpe = pd.Series([False, 0, np.nan, 0])
- cmin = pd.Series([False, False, np.nan, False])
- cmax = pd.Series([False, True, np.nan, True])
- expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
-
- for method in methods:
- res = getattr(e, method)()
- tm.assert_series_equal(res, expecteds[method])
-
def test_isin(self):
s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
@@ -803,54 +762,6 @@ def test_numpy_repeat(self):
with pytest.raises(ValueError, match=msg):
np.repeat(s, 2, axis=0)
- def test_searchsorted(self):
- s = Series([1, 2, 3])
-
- result = s.searchsorted(1, side="left")
- assert is_scalar(result)
- assert result == 0
-
- result = s.searchsorted(1, side="right")
- assert is_scalar(result)
- assert result == 1
-
- def test_searchsorted_numeric_dtypes_scalar(self):
- s = Series([1, 2, 90, 1000, 3e9])
- r = s.searchsorted(30)
- assert is_scalar(r)
- assert r == 2
-
- r = s.searchsorted([30])
- e = np.array([2], dtype=np.intp)
- tm.assert_numpy_array_equal(r, e)
-
- def test_searchsorted_numeric_dtypes_vector(self):
- s = Series([1, 2, 90, 1000, 3e9])
- r = s.searchsorted([91, 2e6])
- e = np.array([3, 4], dtype=np.intp)
- tm.assert_numpy_array_equal(r, e)
-
- def test_search_sorted_datetime64_scalar(self):
- s = Series(pd.date_range("20120101", periods=10, freq="2D"))
- v = pd.Timestamp("20120102")
- r = s.searchsorted(v)
- assert is_scalar(r)
- assert r == 1
-
- def test_search_sorted_datetime64_list(self):
- s = Series(pd.date_range("20120101", periods=10, freq="2D"))
- v = [pd.Timestamp("20120102"), pd.Timestamp("20120104")]
- r = s.searchsorted(v)
- e = np.array([1, 2], dtype=np.intp)
- tm.assert_numpy_array_equal(r, e)
-
- def test_searchsorted_sorter(self):
- # GH8490
- s = Series([3, 1, 2])
- r = s.searchsorted([0, 3], sorter=np.argsort(s))
- e = np.array([0, 2], dtype=np.intp)
- tm.assert_numpy_array_equal(r, e)
-
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
@@ -1004,117 +915,6 @@ def test_unstack(self):
right.index = pd.MultiIndex.from_tuples(tpls)
tm.assert_frame_equal(ts.unstack(level=0), right)
- def test_value_counts_datetime(self):
- # most dtypes are tested in tests/base
- values = [
- pd.Timestamp("2011-01-01 09:00"),
- pd.Timestamp("2011-01-01 10:00"),
- pd.Timestamp("2011-01-01 11:00"),
- pd.Timestamp("2011-01-01 09:00"),
- pd.Timestamp("2011-01-01 09:00"),
- pd.Timestamp("2011-01-01 11:00"),
- ]
-
- exp_idx = pd.DatetimeIndex(
- ["2011-01-01 09:00", "2011-01-01 11:00", "2011-01-01 10:00"]
- )
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
-
- s = pd.Series(values, name="xxx")
- tm.assert_series_equal(s.value_counts(), exp)
- # check DatetimeIndex outputs the same result
- idx = pd.DatetimeIndex(values, name="xxx")
- tm.assert_series_equal(idx.value_counts(), exp)
-
- # normalize
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
- tm.assert_series_equal(s.value_counts(normalize=True), exp)
- tm.assert_series_equal(idx.value_counts(normalize=True), exp)
-
- def test_value_counts_datetime_tz(self):
- values = [
- pd.Timestamp("2011-01-01 09:00", tz="US/Eastern"),
- pd.Timestamp("2011-01-01 10:00", tz="US/Eastern"),
- pd.Timestamp("2011-01-01 11:00", tz="US/Eastern"),
- pd.Timestamp("2011-01-01 09:00", tz="US/Eastern"),
- pd.Timestamp("2011-01-01 09:00", tz="US/Eastern"),
- pd.Timestamp("2011-01-01 11:00", tz="US/Eastern"),
- ]
-
- exp_idx = pd.DatetimeIndex(
- ["2011-01-01 09:00", "2011-01-01 11:00", "2011-01-01 10:00"],
- tz="US/Eastern",
- )
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
-
- s = pd.Series(values, name="xxx")
- tm.assert_series_equal(s.value_counts(), exp)
- idx = pd.DatetimeIndex(values, name="xxx")
- tm.assert_series_equal(idx.value_counts(), exp)
-
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
- tm.assert_series_equal(s.value_counts(normalize=True), exp)
- tm.assert_series_equal(idx.value_counts(normalize=True), exp)
-
- def test_value_counts_period(self):
- values = [
- pd.Period("2011-01", freq="M"),
- pd.Period("2011-02", freq="M"),
- pd.Period("2011-03", freq="M"),
- pd.Period("2011-01", freq="M"),
- pd.Period("2011-01", freq="M"),
- pd.Period("2011-03", freq="M"),
- ]
-
- exp_idx = pd.PeriodIndex(["2011-01", "2011-03", "2011-02"], freq="M")
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
-
- s = pd.Series(values, name="xxx")
- tm.assert_series_equal(s.value_counts(), exp)
- # check DatetimeIndex outputs the same result
- idx = pd.PeriodIndex(values, name="xxx")
- tm.assert_series_equal(idx.value_counts(), exp)
-
- # normalize
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
- tm.assert_series_equal(s.value_counts(normalize=True), exp)
- tm.assert_series_equal(idx.value_counts(normalize=True), exp)
-
- def test_value_counts_categorical_ordered(self):
- # most dtypes are tested in tests/base
- values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True)
-
- exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=True)
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
-
- s = pd.Series(values, name="xxx")
- tm.assert_series_equal(s.value_counts(), exp)
- # check CategoricalIndex outputs the same result
- idx = pd.CategoricalIndex(values, name="xxx")
- tm.assert_series_equal(idx.value_counts(), exp)
-
- # normalize
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
- tm.assert_series_equal(s.value_counts(normalize=True), exp)
- tm.assert_series_equal(idx.value_counts(normalize=True), exp)
-
- def test_value_counts_categorical_not_ordered(self):
- values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False)
-
- exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=False)
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
-
- s = pd.Series(values, name="xxx")
- tm.assert_series_equal(s.value_counts(), exp)
- # check CategoricalIndex outputs the same result
- idx = pd.CategoricalIndex(values, name="xxx")
- tm.assert_series_equal(idx.value_counts(), exp)
-
- # normalize
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
- tm.assert_series_equal(s.value_counts(normalize=True), exp)
- tm.assert_series_equal(idx.value_counts(normalize=True), exp)
-
@pytest.mark.parametrize("func", [np.any, np.all])
@pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())])
@td.skip_if_np_lt("1.15")
@@ -1166,207 +966,6 @@ def test_validate_stat_keepdims(self):
np.sum(s, keepdims=True)
-main_dtypes = [
- "datetime",
- "datetimetz",
- "timedelta",
- "int8",
- "int16",
- "int32",
- "int64",
- "float32",
- "float64",
- "uint8",
- "uint16",
- "uint32",
- "uint64",
-]
-
-
-@pytest.fixture
-def s_main_dtypes():
- """A DataFrame with many dtypes
-
- * datetime
- * datetimetz
- * timedelta
- * [u]int{8,16,32,64}
- * float{32,64}
-
- The columns are the name of the dtype.
- """
- df = pd.DataFrame(
- {
- "datetime": pd.to_datetime(["2003", "2002", "2001", "2002", "2005"]),
- "datetimetz": pd.to_datetime(
- ["2003", "2002", "2001", "2002", "2005"]
- ).tz_localize("US/Eastern"),
- "timedelta": pd.to_timedelta(["3d", "2d", "1d", "2d", "5d"]),
- }
- )
-
- for dtype in [
- "int8",
- "int16",
- "int32",
- "int64",
- "float32",
- "float64",
- "uint8",
- "uint16",
- "uint32",
- "uint64",
- ]:
- df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)
-
- return df
-
-
-@pytest.fixture(params=main_dtypes)
-def s_main_dtypes_split(request, s_main_dtypes):
- """Each series in s_main_dtypes."""
- return s_main_dtypes[request.param]
-
-
-def assert_check_nselect_boundary(vals, dtype, method):
- # helper function for 'test_boundary_{dtype}' tests
- s = Series(vals, dtype=dtype)
- result = getattr(s, method)(3)
- expected_idxr = [0, 1, 2] if method == "nsmallest" else [3, 2, 1]
- expected = s.loc[expected_idxr]
- tm.assert_series_equal(result, expected)
-
-
-class TestNLargestNSmallest:
- @pytest.mark.parametrize(
- "r",
- [
- Series([3.0, 2, 1, 2, "5"], dtype="object"),
- Series([3.0, 2, 1, 2, 5], dtype="object"),
- # not supported on some archs
- # Series([3., 2, 1, 2, 5], dtype='complex256'),
- Series([3.0, 2, 1, 2, 5], dtype="complex128"),
- Series(list("abcde")),
- Series(list("abcde"), dtype="category"),
- ],
- )
- def test_error(self, r):
- dt = r.dtype
- msg = "Cannot use method 'n(larg|small)est' with dtype {dt}".format(dt=dt)
- args = 2, len(r), 0, -1
- methods = r.nlargest, r.nsmallest
- for method, arg in product(methods, args):
- with pytest.raises(TypeError, match=msg):
- method(arg)
-
- def test_nsmallest_nlargest(self, s_main_dtypes_split):
- # float, int, datetime64 (use i8), timedelts64 (same),
- # object that are numbers, object that are strings
- s = s_main_dtypes_split
-
- tm.assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
- tm.assert_series_equal(s.nsmallest(2, keep="last"), s.iloc[[2, 3]])
-
- empty = s.iloc[0:0]
- tm.assert_series_equal(s.nsmallest(0), empty)
- tm.assert_series_equal(s.nsmallest(-1), empty)
- tm.assert_series_equal(s.nlargest(0), empty)
- tm.assert_series_equal(s.nlargest(-1), empty)
-
- tm.assert_series_equal(s.nsmallest(len(s)), s.sort_values())
- tm.assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
- tm.assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
- tm.assert_series_equal(s.nlargest(len(s) + 1), s.iloc[[4, 0, 1, 3, 2]])
-
- def test_misc(self):
-
- s = Series([3.0, np.nan, 1, 2, 5])
- tm.assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
- tm.assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
-
- msg = 'keep must be either "first", "last"'
- with pytest.raises(ValueError, match=msg):
- s.nsmallest(keep="invalid")
- with pytest.raises(ValueError, match=msg):
- s.nlargest(keep="invalid")
-
- # GH 15297
- s = Series([1] * 5, index=[1, 2, 3, 4, 5])
- expected_first = Series([1] * 3, index=[1, 2, 3])
- expected_last = Series([1] * 3, index=[5, 4, 3])
-
- result = s.nsmallest(3)
- tm.assert_series_equal(result, expected_first)
-
- result = s.nsmallest(3, keep="last")
- tm.assert_series_equal(result, expected_last)
-
- result = s.nlargest(3)
- tm.assert_series_equal(result, expected_first)
-
- result = s.nlargest(3, keep="last")
- tm.assert_series_equal(result, expected_last)
-
- @pytest.mark.parametrize("n", range(1, 5))
- def test_n(self, n):
-
- # GH 13412
- s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
- result = s.nlargest(n)
- expected = s.sort_values(ascending=False).head(n)
- tm.assert_series_equal(result, expected)
-
- result = s.nsmallest(n)
- expected = s.sort_values().head(n)
- tm.assert_series_equal(result, expected)
-
- def test_boundary_integer(self, nselect_method, any_int_dtype):
- # GH 21426
- dtype_info = np.iinfo(any_int_dtype)
- min_val, max_val = dtype_info.min, dtype_info.max
- vals = [min_val, min_val + 1, max_val - 1, max_val]
- assert_check_nselect_boundary(vals, any_int_dtype, nselect_method)
-
- def test_boundary_float(self, nselect_method, float_dtype):
- # GH 21426
- dtype_info = np.finfo(float_dtype)
- min_val, max_val = dtype_info.min, dtype_info.max
- min_2nd, max_2nd = np.nextafter([min_val, max_val], 0, dtype=float_dtype)
- vals = [min_val, min_2nd, max_2nd, max_val]
- assert_check_nselect_boundary(vals, float_dtype, nselect_method)
-
- @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
- def test_boundary_datetimelike(self, nselect_method, dtype):
- # GH 21426
- # use int64 bounds and +1 to min_val since true minimum is NaT
- # (include min_val/NaT at end to maintain same expected_idxr)
- dtype_info = np.iinfo("int64")
- min_val, max_val = dtype_info.min, dtype_info.max
- vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val]
- assert_check_nselect_boundary(vals, dtype, nselect_method)
-
- def test_duplicate_keep_all_ties(self):
- # see gh-16818
- s = Series([10, 9, 8, 7, 7, 7, 7, 6])
- result = s.nlargest(4, keep="all")
- expected = Series([10, 9, 8, 7, 7, 7, 7])
- tm.assert_series_equal(result, expected)
-
- result = s.nsmallest(2, keep="all")
- expected = Series([6, 7, 7, 7, 7], index=[7, 3, 4, 5, 6])
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize(
- "data,expected", [([True, False], [True]), ([True, False, True, True], [True])]
- )
- def test_boolean(self, data, expected):
- # GH 26154 : ensure True > False
- s = Series(data)
- result = s.nlargest(1)
- expected = Series(expected)
- tm.assert_series_equal(result, expected)
-
-
class TestCategoricalSeriesAnalytics:
def test_count(self):
@@ -1378,67 +977,6 @@ def test_count(self):
result = s.count()
assert result == 2
- def test_value_counts(self):
- # GH 12835
- cats = Categorical(list("abcccb"), categories=list("cabd"))
- s = Series(cats, name="xxx")
- res = s.value_counts(sort=False)
-
- exp_index = CategoricalIndex(list("cabd"), categories=cats.categories)
- exp = Series([3, 1, 2, 0], name="xxx", index=exp_index)
- tm.assert_series_equal(res, exp)
-
- res = s.value_counts(sort=True)
-
- exp_index = CategoricalIndex(list("cbad"), categories=cats.categories)
- exp = Series([3, 2, 1, 0], name="xxx", index=exp_index)
- tm.assert_series_equal(res, exp)
-
- # check object dtype handles the Series.name as the same
- # (tested in tests/base)
- s = Series(["a", "b", "c", "c", "c", "b"], name="xxx")
- res = s.value_counts()
- exp = Series([3, 2, 1], name="xxx", index=["c", "b", "a"])
- tm.assert_series_equal(res, exp)
-
- def test_value_counts_with_nan(self):
- # see gh-9443
-
- # sanity check
- s = Series(["a", "b", "a"], dtype="category")
- exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
-
- res = s.value_counts(dropna=True)
- tm.assert_series_equal(res, exp)
-
- res = s.value_counts(dropna=True)
- tm.assert_series_equal(res, exp)
-
- # same Series via two different constructions --> same behaviour
- series = [
- Series(["a", "b", None, "a", None, None], dtype="category"),
- Series(
- Categorical(["a", "b", None, "a", None, None], categories=["a", "b"])
- ),
- ]
-
- for s in series:
- # None is a NaN value, so we exclude its count here
- exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
- res = s.value_counts(dropna=True)
- tm.assert_series_equal(res, exp)
-
- # we don't exclude the count of None and sort by counts
- exp = Series([3, 2, 1], index=CategoricalIndex([np.nan, "a", "b"]))
- res = s.value_counts(dropna=False)
- tm.assert_series_equal(res, exp)
-
- # When we aren't sorting by counts, and np.nan isn't a
- # category, it should be last.
- exp = Series([2, 1, 3], index=CategoricalIndex(["a", "b", np.nan]))
- res = s.value_counts(dropna=False, sort=False)
- tm.assert_series_equal(res, exp)
-
@pytest.mark.parametrize(
"dtype",
["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"],
diff --git a/pandas/tests/series/test_cumulative.py b/pandas/tests/series/test_cumulative.py
index a31cc9d968f3a..0fac279291c66 100644
--- a/pandas/tests/series/test_cumulative.py
+++ b/pandas/tests/series/test_cumulative.py
@@ -5,6 +5,8 @@
--------
tests.frame.test_cumulative
"""
+from itertools import product
+
import numpy as np
import pytest
@@ -140,3 +142,33 @@ def test_cummax_timedelta64(self):
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
+
+ def test_cummethods_bool(self):
+ # GH#6270
+
+ a = pd.Series([False, False, False, True, True, False, False])
+ b = ~a
+ c = pd.Series([False] * len(b))
+ d = ~c
+ methods = {
+ "cumsum": np.cumsum,
+ "cumprod": np.cumprod,
+ "cummin": np.minimum.accumulate,
+ "cummax": np.maximum.accumulate,
+ }
+ args = product((a, b, c, d), methods)
+ for s, method in args:
+ expected = pd.Series(methods[method](s.values))
+ result = getattr(s, method)()
+ tm.assert_series_equal(result, expected)
+
+ e = pd.Series([False, True, np.nan, False])
+ cse = pd.Series([0, 1, np.nan, 1], dtype=object)
+ cpe = pd.Series([False, 0, np.nan, 0])
+ cmin = pd.Series([False, False, np.nan, False])
+ cmax = pd.Series([False, True, np.nan, True])
+ expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
+
+ for method in methods:
+ res = getattr(e, method)()
+ tm.assert_series_equal(res, expecteds[method])
| In parallel with, and orthogonal to, #30381.
Identified a couple of tests that were in the wrong places, e.g. the one moved int test_cumulative. | https://api.github.com/repos/pandas-dev/pandas/pulls/30385 | 2019-12-20T23:10:46Z | 2019-12-23T15:13:08Z | 2019-12-23T15:13:08Z | 2019-12-23T15:37:44Z |
CLN: DataFrameGroupBy._cython_agg_general | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index b2543289f68c6..dc343f670b725 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -11,6 +11,7 @@
from textwrap import dedent
import typing
from typing import (
+ TYPE_CHECKING,
Any,
Callable,
FrozenSet,
@@ -68,6 +69,10 @@
from pandas.plotting import boxplot_frame_groupby
+if TYPE_CHECKING:
+ from pandas.core.internals import Block
+
+
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
@@ -987,11 +992,11 @@ def _iterate_slices(self) -> Iterable[Series]:
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
- ):
- new_items, new_blocks = self._cython_agg_blocks(
+ ) -> DataFrame:
+ agg_items, agg_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count
)
- return self._wrap_agged_blocks(new_items, new_blocks)
+ return self._wrap_agged_blocks(agg_blocks, items=agg_items)
def _cython_agg_blocks(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
@@ -1691,17 +1696,17 @@ def _wrap_transformed_output(
return result
- def _wrap_agged_blocks(self, items, blocks):
+ def _wrap_agged_blocks(self, blocks: "Sequence[Block]", items: Index) -> DataFrame:
if not self.as_index:
index = np.arange(blocks[0].values.shape[-1])
- mgr = BlockManager(blocks, [items, index])
+ mgr = BlockManager(blocks, axes=[items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
- mgr = BlockManager(blocks, [items, index])
+ mgr = BlockManager(blocks, axes=[items, index])
result = DataFrame(mgr)
if self.axis == 1:
@@ -1740,18 +1745,18 @@ def count(self):
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
- val = (
+ vals = (
(mask & ~_isna_ndarraylike(np.atleast_2d(blk.get_values())))
for blk in data.blocks
)
- loc = (blk.mgr_locs for blk in data.blocks)
+ locs = (blk.mgr_locs for blk in data.blocks)
- counted = [
- lib.count_level_2d(x, labels=ids, max_bin=ngroups, axis=1) for x in val
- ]
- blk = map(make_block, counted, loc)
+ counted = (
+ lib.count_level_2d(x, labels=ids, max_bin=ngroups, axis=1) for x in vals
+ )
+ blocks = [make_block(val, placement=loc) for val, loc in zip(counted, locs)]
- return self._wrap_agged_blocks(data.items, list(blk))
+ return self._wrap_agged_blocks(blocks, items=data.items)
def nunique(self, dropna: bool = True):
"""
| Some small cleanups that make the code easier to read IMO. | https://api.github.com/repos/pandas-dev/pandas/pulls/30384 | 2019-12-20T22:48:09Z | 2019-12-24T14:42:07Z | 2019-12-24T14:42:07Z | 2019-12-24T14:42:10Z |
CLN: move code out of a try clause in merge.py | diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 0fb029c8429a6..3dfd5fed34741 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -116,20 +116,20 @@ def _groupby_and_merge(
# if we can groupby the rhs
# then we can get vastly better perf
- try:
- # we will check & remove duplicates if indicated
- if check_duplicates:
- if on is None:
- on = []
- elif not isinstance(on, (list, tuple)):
- on = [on]
-
- if right.duplicated(by + on).any():
- _right = right.drop_duplicates(by + on, keep="last")
- # TODO: use overload to refine return type of drop_duplicates
- assert _right is not None # needed for mypy
- right = _right
+ # we will check & remove duplicates if indicated
+ if check_duplicates:
+ if on is None:
+ on = []
+ elif not isinstance(on, (list, tuple)):
+ on = [on]
+
+ if right.duplicated(by + on).any():
+ _right = right.drop_duplicates(by + on, keep="last")
+ # TODO: use overload to refine return type of drop_duplicates
+ assert _right is not None # needed for mypy
+ right = _right
+ try:
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
| Minor clean-up. | https://api.github.com/repos/pandas-dev/pandas/pulls/30382 | 2019-12-20T22:04:23Z | 2019-12-21T19:55:52Z | 2019-12-21T19:55:52Z | 2019-12-21T20:01:21Z |
REF: targeted test files for clip, isin, describe, transpose | diff --git a/pandas/tests/frame/methods/test_clip.py b/pandas/tests/frame/methods/test_clip.py
new file mode 100644
index 0000000000000..48444e909ee01
--- /dev/null
+++ b/pandas/tests/frame/methods/test_clip.py
@@ -0,0 +1,157 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Series
+import pandas.util.testing as tm
+
+
+class TestDataFrameClip:
+ def test_clip(self, float_frame):
+ median = float_frame.median().median()
+ original = float_frame.copy()
+
+ double = float_frame.clip(upper=median, lower=median)
+ assert not (double.values != median).any()
+
+ # Verify that float_frame was not changed inplace
+ assert (float_frame.values == original.values).all()
+
+ def test_inplace_clip(self, float_frame):
+ # GH#15388
+ median = float_frame.median().median()
+ frame_copy = float_frame.copy()
+
+ frame_copy.clip(upper=median, lower=median, inplace=True)
+ assert not (frame_copy.values != median).any()
+
+ def test_dataframe_clip(self):
+ # GH#2747
+ df = DataFrame(np.random.randn(1000, 2))
+
+ for lb, ub in [(-1, 1), (1, -1)]:
+ clipped_df = df.clip(lb, ub)
+
+ lb, ub = min(lb, ub), max(ub, lb)
+ lb_mask = df.values <= lb
+ ub_mask = df.values >= ub
+ mask = ~lb_mask & ~ub_mask
+ assert (clipped_df.values[lb_mask] == lb).all()
+ assert (clipped_df.values[ub_mask] == ub).all()
+ assert (clipped_df.values[mask] == df.values[mask]).all()
+
+ def test_clip_mixed_numeric(self):
+ # TODO(jreback)
+ # clip on mixed integer or floats
+ # with integer clippers coerces to float
+ df = DataFrame({"A": [1, 2, 3], "B": [1.0, np.nan, 3.0]})
+ result = df.clip(1, 2)
+ expected = DataFrame({"A": [1, 2, 2], "B": [1.0, np.nan, 2.0]})
+ tm.assert_frame_equal(result, expected, check_like=True)
+
+ # GH#24162, clipping now preserves numeric types per column
+ df = DataFrame([[1, 2, 3.4], [3, 4, 5.6]], columns=["foo", "bar", "baz"])
+ expected = df.dtypes
+ result = df.clip(upper=3).dtypes
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("inplace", [True, False])
+ def test_clip_against_series(self, inplace):
+ # GH#6966
+
+ df = DataFrame(np.random.randn(1000, 2))
+ lb = Series(np.random.randn(1000))
+ ub = lb + 1
+
+ original = df.copy()
+ clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
+
+ if inplace:
+ clipped_df = df
+
+ for i in range(2):
+ lb_mask = original.iloc[:, i] <= lb
+ ub_mask = original.iloc[:, i] >= ub
+ mask = ~lb_mask & ~ub_mask
+
+ result = clipped_df.loc[lb_mask, i]
+ tm.assert_series_equal(result, lb[lb_mask], check_names=False)
+ assert result.name == i
+
+ result = clipped_df.loc[ub_mask, i]
+ tm.assert_series_equal(result, ub[ub_mask], check_names=False)
+ assert result.name == i
+
+ tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
+
+ @pytest.mark.parametrize("inplace", [True, False])
+ @pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
+ @pytest.mark.parametrize(
+ "axis,res",
+ [
+ (0, [[2.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 7.0, 7.0]]),
+ (1, [[2.0, 3.0, 4.0], [4.0, 5.0, 6.0], [5.0, 6.0, 7.0]]),
+ ],
+ )
+ def test_clip_against_list_like(self, simple_frame, inplace, lower, axis, res):
+ # GH#15390
+ original = simple_frame.copy(deep=True)
+
+ result = original.clip(lower=lower, upper=[5, 6, 7], axis=axis, inplace=inplace)
+
+ expected = pd.DataFrame(res, columns=original.columns, index=original.index)
+ if inplace:
+ result = original
+ tm.assert_frame_equal(result, expected, check_exact=True)
+
+ @pytest.mark.parametrize("axis", [0, 1, None])
+ def test_clip_against_frame(self, axis):
+ df = DataFrame(np.random.randn(1000, 2))
+ lb = DataFrame(np.random.randn(1000, 2))
+ ub = lb + 1
+
+ clipped_df = df.clip(lb, ub, axis=axis)
+
+ lb_mask = df <= lb
+ ub_mask = df >= ub
+ mask = ~lb_mask & ~ub_mask
+
+ tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
+ tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
+ tm.assert_frame_equal(clipped_df[mask], df[mask])
+
+ def test_clip_against_unordered_columns(self):
+ # GH#20911
+ df1 = DataFrame(np.random.randn(1000, 4), columns=["A", "B", "C", "D"])
+ df2 = DataFrame(np.random.randn(1000, 4), columns=["D", "A", "B", "C"])
+ df3 = DataFrame(df2.values - 1, columns=["B", "D", "C", "A"])
+ result_upper = df1.clip(lower=0, upper=df2)
+ expected_upper = df1.clip(lower=0, upper=df2[df1.columns])
+ result_lower = df1.clip(lower=df3, upper=3)
+ expected_lower = df1.clip(lower=df3[df1.columns], upper=3)
+ result_lower_upper = df1.clip(lower=df3, upper=df2)
+ expected_lower_upper = df1.clip(lower=df3[df1.columns], upper=df2[df1.columns])
+ tm.assert_frame_equal(result_upper, expected_upper)
+ tm.assert_frame_equal(result_lower, expected_lower)
+ tm.assert_frame_equal(result_lower_upper, expected_lower_upper)
+
+ def test_clip_with_na_args(self, float_frame):
+ """Should process np.nan argument as None """
+ # GH#17276
+ tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)
+ tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan), float_frame)
+
+ # GH#19992
+ df = DataFrame({"col_0": [1, 2, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]})
+
+ result = df.clip(lower=[4, 5, np.nan], axis=0)
+ expected = DataFrame(
+ {"col_0": [4, 5, np.nan], "col_1": [4, 5, np.nan], "col_2": [7, 8, np.nan]}
+ )
+ tm.assert_frame_equal(result, expected)
+
+ result = df.clip(lower=[4, 5, np.nan], axis=1)
+ expected = DataFrame(
+ {"col_0": [4, 4, 4], "col_1": [5, 5, 6], "col_2": [np.nan, np.nan, np.nan]}
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py
new file mode 100644
index 0000000000000..09510fc931546
--- /dev/null
+++ b/pandas/tests/frame/methods/test_describe.py
@@ -0,0 +1,333 @@
+import numpy as np
+
+import pandas as pd
+from pandas import Categorical, DataFrame, Series, Timestamp, date_range
+import pandas.util.testing as tm
+
+
+class TestDataFrameDescribe:
+ def test_describe_bool_in_mixed_frame(self):
+ df = DataFrame(
+ {
+ "string_data": ["a", "b", "c", "d", "e"],
+ "bool_data": [True, True, False, False, False],
+ "int_data": [10, 20, 30, 40, 50],
+ }
+ )
+
+ # Integer data are included in .describe() output,
+ # Boolean and string data are not.
+ result = df.describe()
+ expected = DataFrame(
+ {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]},
+ index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # Top value is a boolean value that is False
+ result = df.describe(include=["bool"])
+
+ expected = DataFrame(
+ {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"]
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_describe_empty_object(self):
+ # GH#27183
+ df = pd.DataFrame({"A": [None, None]}, dtype=object)
+ result = df.describe()
+ expected = pd.DataFrame(
+ {"A": [0, 0, np.nan, np.nan]},
+ dtype=object,
+ index=["count", "unique", "top", "freq"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[:0].describe()
+ tm.assert_frame_equal(result, expected)
+
+ def test_describe_bool_frame(self):
+ # GH#13891
+ df = pd.DataFrame(
+ {
+ "bool_data_1": [False, False, True, True],
+ "bool_data_2": [False, True, True, True],
+ }
+ )
+ result = df.describe()
+ expected = DataFrame(
+ {"bool_data_1": [4, 2, True, 2], "bool_data_2": [4, 2, True, 3]},
+ index=["count", "unique", "top", "freq"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ df = pd.DataFrame(
+ {
+ "bool_data": [False, False, True, True, False],
+ "int_data": [0, 1, 2, 3, 4],
+ }
+ )
+ result = df.describe()
+ expected = DataFrame(
+ {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]},
+ index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ df = pd.DataFrame(
+ {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]}
+ )
+ result = df.describe()
+ expected = DataFrame(
+ {"bool_data": [4, 2, True, 2], "str_data": [4, 3, "a", 2]},
+ index=["count", "unique", "top", "freq"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_describe_categorical(self):
+ df = DataFrame({"value": np.random.randint(0, 10000, 100)})
+ labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
+ cat_labels = Categorical(labels, labels)
+
+ df = df.sort_values(by=["value"], ascending=True)
+ df["value_group"] = pd.cut(
+ df.value, range(0, 10500, 500), right=False, labels=cat_labels
+ )
+ cat = df
+
+ # Categoricals should not show up together with numerical columns
+ result = cat.describe()
+ assert len(result.columns) == 1
+
+ # In a frame, describe() for the cat should be the same as for string
+ # arrays (count, unique, top, freq)
+
+ cat = Categorical(
+ ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True
+ )
+ s = Series(cat)
+ result = s.describe()
+ expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"])
+ tm.assert_series_equal(result, expected)
+
+ cat = Series(Categorical(["a", "b", "c", "c"]))
+ df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
+ result = df3.describe()
+ tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
+
+ def test_describe_empty_categorical_column(self):
+ # GH#26397
+ # Ensure the index of an an empty categorical DataFrame column
+ # also contains (count, unique, top, freq)
+ df = pd.DataFrame({"empty_col": Categorical([])})
+ result = df.describe()
+ expected = DataFrame(
+ {"empty_col": [0, 0, np.nan, np.nan]},
+ index=["count", "unique", "top", "freq"],
+ dtype="object",
+ )
+ tm.assert_frame_equal(result, expected)
+ # ensure NaN, not None
+ assert np.isnan(result.iloc[2, 0])
+ assert np.isnan(result.iloc[3, 0])
+
+ def test_describe_categorical_columns(self):
+ # GH#11558
+ columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX")
+ df = DataFrame(
+ {
+ "int1": [10, 20, 30, 40, 50],
+ "int2": [10, 20, 30, 40, 50],
+ "obj": ["A", 0, None, "X", 1],
+ },
+ columns=columns,
+ )
+ result = df.describe()
+
+ exp_columns = pd.CategoricalIndex(
+ ["int1", "int2"],
+ categories=["int1", "int2", "obj"],
+ ordered=True,
+ name="XXX",
+ )
+ expected = DataFrame(
+ {
+ "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50],
+ "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50],
+ },
+ index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
+ columns=exp_columns,
+ )
+
+ tm.assert_frame_equal(result, expected)
+ tm.assert_categorical_equal(result.columns.values, expected.columns.values)
+
+ def test_describe_datetime_columns(self):
+ columns = pd.DatetimeIndex(
+ ["2011-01-01", "2011-02-01", "2011-03-01"],
+ freq="MS",
+ tz="US/Eastern",
+ name="XXX",
+ )
+ df = DataFrame(
+ {
+ 0: [10, 20, 30, 40, 50],
+ 1: [10, 20, 30, 40, 50],
+ 2: ["A", 0, None, "X", 1],
+ }
+ )
+ df.columns = columns
+ result = df.describe()
+
+ exp_columns = pd.DatetimeIndex(
+ ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX"
+ )
+ expected = DataFrame(
+ {
+ 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50],
+ 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50],
+ },
+ index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
+ )
+ expected.columns = exp_columns
+ tm.assert_frame_equal(result, expected)
+ assert result.columns.freq == "MS"
+ assert result.columns.tz == expected.columns.tz
+
+ def test_describe_timedelta_values(self):
+ # GH#6145
+ t1 = pd.timedelta_range("1 days", freq="D", periods=5)
+ t2 = pd.timedelta_range("1 hours", freq="H", periods=5)
+ df = pd.DataFrame({"t1": t1, "t2": t2})
+
+ expected = DataFrame(
+ {
+ "t1": [
+ 5,
+ pd.Timedelta("3 days"),
+ df.iloc[:, 0].std(),
+ pd.Timedelta("1 days"),
+ pd.Timedelta("2 days"),
+ pd.Timedelta("3 days"),
+ pd.Timedelta("4 days"),
+ pd.Timedelta("5 days"),
+ ],
+ "t2": [
+ 5,
+ pd.Timedelta("3 hours"),
+ df.iloc[:, 1].std(),
+ pd.Timedelta("1 hours"),
+ pd.Timedelta("2 hours"),
+ pd.Timedelta("3 hours"),
+ pd.Timedelta("4 hours"),
+ pd.Timedelta("5 hours"),
+ ],
+ },
+ index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
+ )
+
+ result = df.describe()
+ tm.assert_frame_equal(result, expected)
+
+ exp_repr = (
+ " t1 t2\n"
+ "count 5 5\n"
+ "mean 3 days 00:00:00 0 days 03:00:00\n"
+ "std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
+ "min 1 days 00:00:00 0 days 01:00:00\n"
+ "25% 2 days 00:00:00 0 days 02:00:00\n"
+ "50% 3 days 00:00:00 0 days 03:00:00\n"
+ "75% 4 days 00:00:00 0 days 04:00:00\n"
+ "max 5 days 00:00:00 0 days 05:00:00"
+ )
+ assert repr(result) == exp_repr
+
+ def test_describe_tz_values(self, tz_naive_fixture):
+ # GH#21332
+ tz = tz_naive_fixture
+ s1 = Series(range(5))
+ start = Timestamp(2018, 1, 1)
+ end = Timestamp(2018, 1, 5)
+ s2 = Series(date_range(start, end, tz=tz))
+ df = pd.DataFrame({"s1": s1, "s2": s2})
+
+ expected = DataFrame(
+ {
+ "s1": [
+ 5,
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ 2,
+ 1.581139,
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ ],
+ "s2": [
+ 5,
+ 5,
+ s2.value_counts().index[0],
+ 1,
+ start.tz_localize(tz),
+ end.tz_localize(tz),
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ ],
+ },
+ index=[
+ "count",
+ "unique",
+ "top",
+ "freq",
+ "first",
+ "last",
+ "mean",
+ "std",
+ "min",
+ "25%",
+ "50%",
+ "75%",
+ "max",
+ ],
+ )
+ result = df.describe(include="all")
+ tm.assert_frame_equal(result, expected)
+
+ def test_describe_percentiles_integer_idx(self):
+ # GH#26660
+ df = pd.DataFrame({"x": [1]})
+ pct = np.linspace(0, 1, 10 + 1)
+ result = df.describe(percentiles=pct)
+
+ expected = DataFrame(
+ {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]},
+ index=[
+ "count",
+ "mean",
+ "std",
+ "min",
+ "0%",
+ "10%",
+ "20%",
+ "30%",
+ "40%",
+ "50%",
+ "60%",
+ "70%",
+ "80%",
+ "90%",
+ "100%",
+ "max",
+ ],
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py
new file mode 100644
index 0000000000000..5d7dc5c843ec1
--- /dev/null
+++ b/pandas/tests/frame/methods/test_isin.py
@@ -0,0 +1,186 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, MultiIndex, Series
+import pandas.util.testing as tm
+
+
+class TestDataFrameIsIn:
+ def test_isin(self):
+ # GH#4211
+ df = DataFrame(
+ {
+ "vals": [1, 2, 3, 4],
+ "ids": ["a", "b", "f", "n"],
+ "ids2": ["a", "n", "c", "n"],
+ },
+ index=["foo", "bar", "baz", "qux"],
+ )
+ other = ["a", "b", "c"]
+
+ result = df.isin(other)
+ expected = DataFrame([df.loc[s].isin(other) for s in df.index])
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
+ def test_isin_empty(self, empty):
+ # GH#16991
+ df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
+ expected = DataFrame(False, df.index, df.columns)
+
+ result = df.isin(empty)
+ tm.assert_frame_equal(result, expected)
+
+ def test_isin_dict(self):
+ df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
+ d = {"A": ["a"]}
+
+ expected = DataFrame(False, df.index, df.columns)
+ expected.loc[0, "A"] = True
+
+ result = df.isin(d)
+ tm.assert_frame_equal(result, expected)
+
+ # non unique columns
+ df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
+ df.columns = ["A", "A"]
+ expected = DataFrame(False, df.index, df.columns)
+ expected.loc[0, "A"] = True
+ result = df.isin(d)
+ tm.assert_frame_equal(result, expected)
+
+ def test_isin_with_string_scalar(self):
+ # GH#4763
+ df = DataFrame(
+ {
+ "vals": [1, 2, 3, 4],
+ "ids": ["a", "b", "f", "n"],
+ "ids2": ["a", "n", "c", "n"],
+ },
+ index=["foo", "bar", "baz", "qux"],
+ )
+ with pytest.raises(TypeError):
+ df.isin("a")
+
+ with pytest.raises(TypeError):
+ df.isin("aaa")
+
+ def test_isin_df(self):
+ df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
+ df2 = DataFrame({"A": [0, 2, 12, 4], "B": [2, np.nan, 4, 5]})
+ expected = DataFrame(False, df1.index, df1.columns)
+ result = df1.isin(df2)
+ expected["A"].loc[[1, 3]] = True
+ expected["B"].loc[[0, 2]] = True
+ tm.assert_frame_equal(result, expected)
+
+ # partial overlapping columns
+ df2.columns = ["A", "C"]
+ result = df1.isin(df2)
+ expected["B"] = False
+ tm.assert_frame_equal(result, expected)
+
+ def test_isin_tuples(self):
+ # GH#16394
+ df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]})
+ df["C"] = list(zip(df["A"], df["B"]))
+ result = df["C"].isin([(1, "a")])
+ tm.assert_series_equal(result, Series([True, False, False], name="C"))
+
+ def test_isin_df_dupe_values(self):
+ df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
+ # just cols duped
+ df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], columns=["B", "B"])
+ with pytest.raises(ValueError):
+ df1.isin(df2)
+
+ # just index duped
+ df2 = DataFrame(
+ [[0, 2], [12, 4], [2, np.nan], [4, 5]],
+ columns=["A", "B"],
+ index=[0, 0, 1, 1],
+ )
+ with pytest.raises(ValueError):
+ df1.isin(df2)
+
+ # cols and index:
+ df2.columns = ["B", "B"]
+ with pytest.raises(ValueError):
+ df1.isin(df2)
+
+ def test_isin_dupe_self(self):
+ other = DataFrame({"A": [1, 0, 1, 0], "B": [1, 1, 0, 0]})
+ df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=["A", "A"])
+ result = df.isin(other)
+ expected = DataFrame(False, index=df.index, columns=df.columns)
+ expected.loc[0] = True
+ expected.iloc[1, 1] = True
+ tm.assert_frame_equal(result, expected)
+
+ def test_isin_against_series(self):
+ df = pd.DataFrame(
+ {"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}, index=["a", "b", "c", "d"]
+ )
+ s = pd.Series([1, 3, 11, 4], index=["a", "b", "c", "d"])
+ expected = DataFrame(False, index=df.index, columns=df.columns)
+ expected["A"].loc["a"] = True
+ expected.loc["d"] = True
+ result = df.isin(s)
+ tm.assert_frame_equal(result, expected)
+
+ def test_isin_multiIndex(self):
+ idx = MultiIndex.from_tuples(
+ [
+ (0, "a", "foo"),
+ (0, "a", "bar"),
+ (0, "b", "bar"),
+ (0, "b", "baz"),
+ (2, "a", "foo"),
+ (2, "a", "bar"),
+ (2, "c", "bar"),
+ (2, "c", "baz"),
+ (1, "b", "foo"),
+ (1, "b", "bar"),
+ (1, "c", "bar"),
+ (1, "c", "baz"),
+ ]
+ )
+ df1 = DataFrame({"A": np.ones(12), "B": np.zeros(12)}, index=idx)
+ df2 = DataFrame(
+ {
+ "A": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
+ "B": [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1],
+ }
+ )
+ # against regular index
+ expected = DataFrame(False, index=df1.index, columns=df1.columns)
+ result = df1.isin(df2)
+ tm.assert_frame_equal(result, expected)
+
+ df2.index = idx
+ expected = df2.values.astype(np.bool)
+ expected[:, 1] = ~expected[:, 1]
+ expected = DataFrame(expected, columns=["A", "B"], index=idx)
+
+ result = df1.isin(df2)
+ tm.assert_frame_equal(result, expected)
+
+ def test_isin_empty_datetimelike(self):
+ # GH#15473
+ df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])})
+ df1_td = DataFrame({"date": [pd.Timedelta(1, "s"), pd.Timedelta(2, "s")]})
+ df2 = DataFrame({"date": []})
+ df3 = DataFrame()
+
+ expected = DataFrame({"date": [False, False]})
+
+ result = df1_ts.isin(df2)
+ tm.assert_frame_equal(result, expected)
+ result = df1_ts.isin(df3)
+ tm.assert_frame_equal(result, expected)
+
+ result = df1_td.isin(df2)
+ tm.assert_frame_equal(result, expected)
+ result = df1_td.isin(df3)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_transpose.py b/pandas/tests/frame/methods/test_transpose.py
new file mode 100644
index 0000000000000..71843053cf3a8
--- /dev/null
+++ b/pandas/tests/frame/methods/test_transpose.py
@@ -0,0 +1,43 @@
+import pandas as pd
+import pandas.util.testing as tm
+
+
+class TestTranspose:
+ def test_transpose_tzaware_1col_single_tz(self):
+ # GH#26825
+ dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
+
+ df = pd.DataFrame(dti)
+ assert (df.dtypes == dti.dtype).all()
+ res = df.T
+ assert (res.dtypes == dti.dtype).all()
+
+ def test_transpose_tzaware_2col_single_tz(self):
+ # GH#26825
+ dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
+
+ df3 = pd.DataFrame({"A": dti, "B": dti})
+ assert (df3.dtypes == dti.dtype).all()
+ res3 = df3.T
+ assert (res3.dtypes == dti.dtype).all()
+
+ def test_transpose_tzaware_2col_mixed_tz(self):
+ # GH#26825
+ dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
+ dti2 = dti.tz_convert("US/Pacific")
+
+ df4 = pd.DataFrame({"A": dti, "B": dti2})
+ assert (df4.dtypes == [dti.dtype, dti2.dtype]).all()
+ assert (df4.T.dtypes == object).all()
+ tm.assert_frame_equal(df4.T.T, df4)
+
+ def test_transpose_object_to_tzaware_mixed_tz(self):
+ # GH#26825
+ dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
+ dti2 = dti.tz_convert("US/Pacific")
+
+ # mixed all-tzaware dtypes
+ df2 = pd.DataFrame([dti, dti2])
+ assert (df2.dtypes == object).all()
+ res2 = df2.T
+ assert (res2.dtypes == [dti.dtype, dti2.dtype]).all()
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index ee9329da4e5e1..1a241cd72ec43 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -530,335 +530,6 @@ def test_corrwith_kendall(self):
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
- # ---------------------------------------------------------------------
- # Describe
-
- def test_bool_describe_in_mixed_frame(self):
- df = DataFrame(
- {
- "string_data": ["a", "b", "c", "d", "e"],
- "bool_data": [True, True, False, False, False],
- "int_data": [10, 20, 30, 40, 50],
- }
- )
-
- # Integer data are included in .describe() output,
- # Boolean and string data are not.
- result = df.describe()
- expected = DataFrame(
- {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]},
- index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
- )
- tm.assert_frame_equal(result, expected)
-
- # Top value is a boolean value that is False
- result = df.describe(include=["bool"])
-
- expected = DataFrame(
- {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"]
- )
- tm.assert_frame_equal(result, expected)
-
- def test_describe_empty_object(self):
- # https://github.com/pandas-dev/pandas/issues/27183
- df = pd.DataFrame({"A": [None, None]}, dtype=object)
- result = df.describe()
- expected = pd.DataFrame(
- {"A": [0, 0, np.nan, np.nan]},
- dtype=object,
- index=["count", "unique", "top", "freq"],
- )
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[:0].describe()
- tm.assert_frame_equal(result, expected)
-
- def test_describe_bool_frame(self):
- # GH 13891
- df = pd.DataFrame(
- {
- "bool_data_1": [False, False, True, True],
- "bool_data_2": [False, True, True, True],
- }
- )
- result = df.describe()
- expected = DataFrame(
- {"bool_data_1": [4, 2, True, 2], "bool_data_2": [4, 2, True, 3]},
- index=["count", "unique", "top", "freq"],
- )
- tm.assert_frame_equal(result, expected)
-
- df = pd.DataFrame(
- {
- "bool_data": [False, False, True, True, False],
- "int_data": [0, 1, 2, 3, 4],
- }
- )
- result = df.describe()
- expected = DataFrame(
- {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]},
- index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
- )
- tm.assert_frame_equal(result, expected)
-
- df = pd.DataFrame(
- {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]}
- )
- result = df.describe()
- expected = DataFrame(
- {"bool_data": [4, 2, True, 2], "str_data": [4, 3, "a", 2]},
- index=["count", "unique", "top", "freq"],
- )
- tm.assert_frame_equal(result, expected)
-
- def test_describe_categorical(self):
- df = DataFrame({"value": np.random.randint(0, 10000, 100)})
- labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
- cat_labels = Categorical(labels, labels)
-
- df = df.sort_values(by=["value"], ascending=True)
- df["value_group"] = pd.cut(
- df.value, range(0, 10500, 500), right=False, labels=cat_labels
- )
- cat = df
-
- # Categoricals should not show up together with numerical columns
- result = cat.describe()
- assert len(result.columns) == 1
-
- # In a frame, describe() for the cat should be the same as for string
- # arrays (count, unique, top, freq)
-
- cat = Categorical(
- ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True
- )
- s = Series(cat)
- result = s.describe()
- expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"])
- tm.assert_series_equal(result, expected)
-
- cat = Series(Categorical(["a", "b", "c", "c"]))
- df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
- result = df3.describe()
- tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
-
- def test_describe_empty_categorical_column(self):
- # GH 26397
- # Ensure the index of an an empty categorical DataFrame column
- # also contains (count, unique, top, freq)
- df = pd.DataFrame({"empty_col": Categorical([])})
- result = df.describe()
- expected = DataFrame(
- {"empty_col": [0, 0, np.nan, np.nan]},
- index=["count", "unique", "top", "freq"],
- dtype="object",
- )
- tm.assert_frame_equal(result, expected)
- # ensure NaN, not None
- assert np.isnan(result.iloc[2, 0])
- assert np.isnan(result.iloc[3, 0])
-
- def test_describe_categorical_columns(self):
- # GH 11558
- columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX")
- df = DataFrame(
- {
- "int1": [10, 20, 30, 40, 50],
- "int2": [10, 20, 30, 40, 50],
- "obj": ["A", 0, None, "X", 1],
- },
- columns=columns,
- )
- result = df.describe()
-
- exp_columns = pd.CategoricalIndex(
- ["int1", "int2"],
- categories=["int1", "int2", "obj"],
- ordered=True,
- name="XXX",
- )
- expected = DataFrame(
- {
- "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50],
- "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50],
- },
- index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
- columns=exp_columns,
- )
-
- tm.assert_frame_equal(result, expected)
- tm.assert_categorical_equal(result.columns.values, expected.columns.values)
-
- def test_describe_datetime_columns(self):
- columns = pd.DatetimeIndex(
- ["2011-01-01", "2011-02-01", "2011-03-01"],
- freq="MS",
- tz="US/Eastern",
- name="XXX",
- )
- df = DataFrame(
- {
- 0: [10, 20, 30, 40, 50],
- 1: [10, 20, 30, 40, 50],
- 2: ["A", 0, None, "X", 1],
- }
- )
- df.columns = columns
- result = df.describe()
-
- exp_columns = pd.DatetimeIndex(
- ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX"
- )
- expected = DataFrame(
- {
- 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50],
- 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50],
- },
- index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
- )
- expected.columns = exp_columns
- tm.assert_frame_equal(result, expected)
- assert result.columns.freq == "MS"
- assert result.columns.tz == expected.columns.tz
-
- def test_describe_timedelta_values(self):
- # GH 6145
- t1 = pd.timedelta_range("1 days", freq="D", periods=5)
- t2 = pd.timedelta_range("1 hours", freq="H", periods=5)
- df = pd.DataFrame({"t1": t1, "t2": t2})
-
- expected = DataFrame(
- {
- "t1": [
- 5,
- pd.Timedelta("3 days"),
- df.iloc[:, 0].std(),
- pd.Timedelta("1 days"),
- pd.Timedelta("2 days"),
- pd.Timedelta("3 days"),
- pd.Timedelta("4 days"),
- pd.Timedelta("5 days"),
- ],
- "t2": [
- 5,
- pd.Timedelta("3 hours"),
- df.iloc[:, 1].std(),
- pd.Timedelta("1 hours"),
- pd.Timedelta("2 hours"),
- pd.Timedelta("3 hours"),
- pd.Timedelta("4 hours"),
- pd.Timedelta("5 hours"),
- ],
- },
- index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
- )
-
- result = df.describe()
- tm.assert_frame_equal(result, expected)
-
- exp_repr = (
- " t1 t2\n"
- "count 5 5\n"
- "mean 3 days 00:00:00 0 days 03:00:00\n"
- "std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
- "min 1 days 00:00:00 0 days 01:00:00\n"
- "25% 2 days 00:00:00 0 days 02:00:00\n"
- "50% 3 days 00:00:00 0 days 03:00:00\n"
- "75% 4 days 00:00:00 0 days 04:00:00\n"
- "max 5 days 00:00:00 0 days 05:00:00"
- )
- assert repr(result) == exp_repr
-
- def test_describe_tz_values(self, tz_naive_fixture):
- # GH 21332
- tz = tz_naive_fixture
- s1 = Series(range(5))
- start = Timestamp(2018, 1, 1)
- end = Timestamp(2018, 1, 5)
- s2 = Series(date_range(start, end, tz=tz))
- df = pd.DataFrame({"s1": s1, "s2": s2})
-
- expected = DataFrame(
- {
- "s1": [
- 5,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- 2,
- 1.581139,
- 0,
- 1,
- 2,
- 3,
- 4,
- ],
- "s2": [
- 5,
- 5,
- s2.value_counts().index[0],
- 1,
- start.tz_localize(tz),
- end.tz_localize(tz),
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- ],
- },
- index=[
- "count",
- "unique",
- "top",
- "freq",
- "first",
- "last",
- "mean",
- "std",
- "min",
- "25%",
- "50%",
- "75%",
- "max",
- ],
- )
- result = df.describe(include="all")
- tm.assert_frame_equal(result, expected)
-
- def test_describe_percentiles_integer_idx(self):
- # Issue 26660
- df = pd.DataFrame({"x": [1]})
- pct = np.linspace(0, 1, 10 + 1)
- result = df.describe(percentiles=pct)
-
- expected = DataFrame(
- {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]},
- index=[
- "count",
- "mean",
- "std",
- "min",
- "0%",
- "10%",
- "20%",
- "30%",
- "40%",
- "50%",
- "60%",
- "70%",
- "80%",
- "90%",
- "100%",
- "max",
- ],
- )
- tm.assert_frame_equal(result, expected)
-
# ---------------------------------------------------------------------
# Reductions
@@ -1781,187 +1452,6 @@ def test_any_all_level_axis_none_raises(self, method):
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level="out")
- # ----------------------------------------------------------------------
- # Isin
-
- def test_isin(self):
- # GH 4211
- df = DataFrame(
- {
- "vals": [1, 2, 3, 4],
- "ids": ["a", "b", "f", "n"],
- "ids2": ["a", "n", "c", "n"],
- },
- index=["foo", "bar", "baz", "qux"],
- )
- other = ["a", "b", "c"]
-
- result = df.isin(other)
- expected = DataFrame([df.loc[s].isin(other) for s in df.index])
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
- def test_isin_empty(self, empty):
- # GH 16991
- df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
- expected = DataFrame(False, df.index, df.columns)
-
- result = df.isin(empty)
- tm.assert_frame_equal(result, expected)
-
- def test_isin_dict(self):
- df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
- d = {"A": ["a"]}
-
- expected = DataFrame(False, df.index, df.columns)
- expected.loc[0, "A"] = True
-
- result = df.isin(d)
- tm.assert_frame_equal(result, expected)
-
- # non unique columns
- df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
- df.columns = ["A", "A"]
- expected = DataFrame(False, df.index, df.columns)
- expected.loc[0, "A"] = True
- result = df.isin(d)
- tm.assert_frame_equal(result, expected)
-
- def test_isin_with_string_scalar(self):
- # GH 4763
- df = DataFrame(
- {
- "vals": [1, 2, 3, 4],
- "ids": ["a", "b", "f", "n"],
- "ids2": ["a", "n", "c", "n"],
- },
- index=["foo", "bar", "baz", "qux"],
- )
- with pytest.raises(TypeError):
- df.isin("a")
-
- with pytest.raises(TypeError):
- df.isin("aaa")
-
- def test_isin_df(self):
- df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
- df2 = DataFrame({"A": [0, 2, 12, 4], "B": [2, np.nan, 4, 5]})
- expected = DataFrame(False, df1.index, df1.columns)
- result = df1.isin(df2)
- expected["A"].loc[[1, 3]] = True
- expected["B"].loc[[0, 2]] = True
- tm.assert_frame_equal(result, expected)
-
- # partial overlapping columns
- df2.columns = ["A", "C"]
- result = df1.isin(df2)
- expected["B"] = False
- tm.assert_frame_equal(result, expected)
-
- def test_isin_tuples(self):
- # GH 16394
- df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]})
- df["C"] = list(zip(df["A"], df["B"]))
- result = df["C"].isin([(1, "a")])
- tm.assert_series_equal(result, Series([True, False, False], name="C"))
-
- def test_isin_df_dupe_values(self):
- df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
- # just cols duped
- df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], columns=["B", "B"])
- with pytest.raises(ValueError):
- df1.isin(df2)
-
- # just index duped
- df2 = DataFrame(
- [[0, 2], [12, 4], [2, np.nan], [4, 5]],
- columns=["A", "B"],
- index=[0, 0, 1, 1],
- )
- with pytest.raises(ValueError):
- df1.isin(df2)
-
- # cols and index:
- df2.columns = ["B", "B"]
- with pytest.raises(ValueError):
- df1.isin(df2)
-
- def test_isin_dupe_self(self):
- other = DataFrame({"A": [1, 0, 1, 0], "B": [1, 1, 0, 0]})
- df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=["A", "A"])
- result = df.isin(other)
- expected = DataFrame(False, index=df.index, columns=df.columns)
- expected.loc[0] = True
- expected.iloc[1, 1] = True
- tm.assert_frame_equal(result, expected)
-
- def test_isin_against_series(self):
- df = pd.DataFrame(
- {"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}, index=["a", "b", "c", "d"]
- )
- s = pd.Series([1, 3, 11, 4], index=["a", "b", "c", "d"])
- expected = DataFrame(False, index=df.index, columns=df.columns)
- expected["A"].loc["a"] = True
- expected.loc["d"] = True
- result = df.isin(s)
- tm.assert_frame_equal(result, expected)
-
- def test_isin_multiIndex(self):
- idx = MultiIndex.from_tuples(
- [
- (0, "a", "foo"),
- (0, "a", "bar"),
- (0, "b", "bar"),
- (0, "b", "baz"),
- (2, "a", "foo"),
- (2, "a", "bar"),
- (2, "c", "bar"),
- (2, "c", "baz"),
- (1, "b", "foo"),
- (1, "b", "bar"),
- (1, "c", "bar"),
- (1, "c", "baz"),
- ]
- )
- df1 = DataFrame({"A": np.ones(12), "B": np.zeros(12)}, index=idx)
- df2 = DataFrame(
- {
- "A": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
- "B": [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1],
- }
- )
- # against regular index
- expected = DataFrame(False, index=df1.index, columns=df1.columns)
- result = df1.isin(df2)
- tm.assert_frame_equal(result, expected)
-
- df2.index = idx
- expected = df2.values.astype(np.bool)
- expected[:, 1] = ~expected[:, 1]
- expected = DataFrame(expected, columns=["A", "B"], index=idx)
-
- result = df1.isin(df2)
- tm.assert_frame_equal(result, expected)
-
- def test_isin_empty_datetimelike(self):
- # GH 15473
- df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])})
- df1_td = DataFrame({"date": [pd.Timedelta(1, "s"), pd.Timedelta(2, "s")]})
- df2 = DataFrame({"date": []})
- df3 = DataFrame()
-
- expected = DataFrame({"date": [False, False]})
-
- result = df1_ts.isin(df2)
- tm.assert_frame_equal(result, expected)
- result = df1_ts.isin(df3)
- tm.assert_frame_equal(result, expected)
-
- result = df1_td.isin(df2)
- tm.assert_frame_equal(result, expected)
- result = df1_td.isin(df3)
- tm.assert_frame_equal(result, expected)
-
# ---------------------------------------------------------------------
# Rounding
@@ -2174,158 +1664,6 @@ def test_round_interval_category_columns(self):
expected = DataFrame([[1.0, 1.0], [0.0, 0.0]], columns=columns)
tm.assert_frame_equal(result, expected)
- # ---------------------------------------------------------------------
- # Clip
-
- def test_clip(self, float_frame):
- median = float_frame.median().median()
- original = float_frame.copy()
-
- double = float_frame.clip(upper=median, lower=median)
- assert not (double.values != median).any()
-
- # Verify that float_frame was not changed inplace
- assert (float_frame.values == original.values).all()
-
- def test_inplace_clip(self, float_frame):
- # GH 15388
- median = float_frame.median().median()
- frame_copy = float_frame.copy()
-
- frame_copy.clip(upper=median, lower=median, inplace=True)
- assert not (frame_copy.values != median).any()
-
- def test_dataframe_clip(self):
- # GH 2747
- df = DataFrame(np.random.randn(1000, 2))
-
- for lb, ub in [(-1, 1), (1, -1)]:
- clipped_df = df.clip(lb, ub)
-
- lb, ub = min(lb, ub), max(ub, lb)
- lb_mask = df.values <= lb
- ub_mask = df.values >= ub
- mask = ~lb_mask & ~ub_mask
- assert (clipped_df.values[lb_mask] == lb).all()
- assert (clipped_df.values[ub_mask] == ub).all()
- assert (clipped_df.values[mask] == df.values[mask]).all()
-
- def test_clip_mixed_numeric(self):
- # TODO(jreback)
- # clip on mixed integer or floats
- # with integer clippers coerces to float
- df = DataFrame({"A": [1, 2, 3], "B": [1.0, np.nan, 3.0]})
- result = df.clip(1, 2)
- expected = DataFrame({"A": [1, 2, 2], "B": [1.0, np.nan, 2.0]})
- tm.assert_frame_equal(result, expected, check_like=True)
-
- # GH 24162, clipping now preserves numeric types per column
- df = DataFrame([[1, 2, 3.4], [3, 4, 5.6]], columns=["foo", "bar", "baz"])
- expected = df.dtypes
- result = df.clip(upper=3).dtypes
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize("inplace", [True, False])
- def test_clip_against_series(self, inplace):
- # GH 6966
-
- df = DataFrame(np.random.randn(1000, 2))
- lb = Series(np.random.randn(1000))
- ub = lb + 1
-
- original = df.copy()
- clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
-
- if inplace:
- clipped_df = df
-
- for i in range(2):
- lb_mask = original.iloc[:, i] <= lb
- ub_mask = original.iloc[:, i] >= ub
- mask = ~lb_mask & ~ub_mask
-
- result = clipped_df.loc[lb_mask, i]
- tm.assert_series_equal(result, lb[lb_mask], check_names=False)
- assert result.name == i
-
- result = clipped_df.loc[ub_mask, i]
- tm.assert_series_equal(result, ub[ub_mask], check_names=False)
- assert result.name == i
-
- tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
-
- @pytest.mark.parametrize("inplace", [True, False])
- @pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
- @pytest.mark.parametrize(
- "axis,res",
- [
- (0, [[2.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 7.0, 7.0]]),
- (1, [[2.0, 3.0, 4.0], [4.0, 5.0, 6.0], [5.0, 6.0, 7.0]]),
- ],
- )
- def test_clip_against_list_like(self, simple_frame, inplace, lower, axis, res):
- # GH 15390
- original = simple_frame.copy(deep=True)
-
- result = original.clip(lower=lower, upper=[5, 6, 7], axis=axis, inplace=inplace)
-
- expected = pd.DataFrame(res, columns=original.columns, index=original.index)
- if inplace:
- result = original
- tm.assert_frame_equal(result, expected, check_exact=True)
-
- @pytest.mark.parametrize("axis", [0, 1, None])
- def test_clip_against_frame(self, axis):
- df = DataFrame(np.random.randn(1000, 2))
- lb = DataFrame(np.random.randn(1000, 2))
- ub = lb + 1
-
- clipped_df = df.clip(lb, ub, axis=axis)
-
- lb_mask = df <= lb
- ub_mask = df >= ub
- mask = ~lb_mask & ~ub_mask
-
- tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
- tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
- tm.assert_frame_equal(clipped_df[mask], df[mask])
-
- def test_clip_against_unordered_columns(self):
- # GH 20911
- df1 = DataFrame(np.random.randn(1000, 4), columns=["A", "B", "C", "D"])
- df2 = DataFrame(np.random.randn(1000, 4), columns=["D", "A", "B", "C"])
- df3 = DataFrame(df2.values - 1, columns=["B", "D", "C", "A"])
- result_upper = df1.clip(lower=0, upper=df2)
- expected_upper = df1.clip(lower=0, upper=df2[df1.columns])
- result_lower = df1.clip(lower=df3, upper=3)
- expected_lower = df1.clip(lower=df3[df1.columns], upper=3)
- result_lower_upper = df1.clip(lower=df3, upper=df2)
- expected_lower_upper = df1.clip(lower=df3[df1.columns], upper=df2[df1.columns])
- tm.assert_frame_equal(result_upper, expected_upper)
- tm.assert_frame_equal(result_lower, expected_lower)
- tm.assert_frame_equal(result_lower_upper, expected_lower_upper)
-
- def test_clip_with_na_args(self, float_frame):
- """Should process np.nan argument as None """
- # GH 17276
- tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)
- tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan), float_frame)
-
- # GH 19992
- df = DataFrame({"col_0": [1, 2, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]})
-
- result = df.clip(lower=[4, 5, np.nan], axis=0)
- expected = DataFrame(
- {"col_0": [4, 5, np.nan], "col_1": [4, 5, np.nan], "col_2": [7, 8, np.nan]}
- )
- tm.assert_frame_equal(result, expected)
-
- result = df.clip(lower=[4, 5, np.nan], axis=1)
- expected = DataFrame(
- {"col_0": [4, 4, 4], "col_1": [5, 5, 6], "col_2": [np.nan, np.nan, np.nan]}
- )
- tm.assert_frame_equal(result, expected)
-
# ---------------------------------------------------------------------
# Matrix-like
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index f3e61dffb500d..a4f1c0688b144 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -888,44 +888,3 @@ def test_no_warning(self, all_arithmetic_operators):
b = df["B"]
with tm.assert_produces_warning(None):
getattr(df, all_arithmetic_operators)(b, 0)
-
-
-class TestTranspose:
- def test_transpose_tzaware_1col_single_tz(self):
- # GH#26825
- dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
-
- df = pd.DataFrame(dti)
- assert (df.dtypes == dti.dtype).all()
- res = df.T
- assert (res.dtypes == dti.dtype).all()
-
- def test_transpose_tzaware_2col_single_tz(self):
- # GH#26825
- dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
-
- df3 = pd.DataFrame({"A": dti, "B": dti})
- assert (df3.dtypes == dti.dtype).all()
- res3 = df3.T
- assert (res3.dtypes == dti.dtype).all()
-
- def test_transpose_tzaware_2col_mixed_tz(self):
- # GH#26825
- dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
- dti2 = dti.tz_convert("US/Pacific")
-
- df4 = pd.DataFrame({"A": dti, "B": dti2})
- assert (df4.dtypes == [dti.dtype, dti2.dtype]).all()
- assert (df4.T.dtypes == object).all()
- tm.assert_frame_equal(df4.T.T, df4)
-
- def test_transpose_object_to_tzaware_mixed_tz(self):
- # GH#26825
- dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
- dti2 = dti.tz_convert("US/Pacific")
-
- # mixed all-tzaware dtypes
- df2 = pd.DataFrame([dti, dti2])
- assert (df2.dtypes == object).all()
- res2 = df2.T
- assert (res2.dtypes == [dti.dtype, dti2.dtype]).all()
diff --git a/pandas/tests/series/methods/test_clip.py b/pandas/tests/series/methods/test_clip.py
new file mode 100644
index 0000000000000..c2bec2744583a
--- /dev/null
+++ b/pandas/tests/series/methods/test_clip.py
@@ -0,0 +1,99 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import Series, Timestamp, isna, notna
+import pandas.util.testing as tm
+
+
+class TestSeriesClip:
+ def test_clip(self, datetime_series):
+ val = datetime_series.median()
+
+ assert datetime_series.clip(lower=val).min() == val
+ assert datetime_series.clip(upper=val).max() == val
+
+ result = datetime_series.clip(-0.5, 0.5)
+ expected = np.clip(datetime_series, -0.5, 0.5)
+ tm.assert_series_equal(result, expected)
+ assert isinstance(expected, Series)
+
+ def test_clip_types_and_nulls(self):
+
+ sers = [
+ Series([np.nan, 1.0, 2.0, 3.0]),
+ Series([None, "a", "b", "c"]),
+ Series(pd.to_datetime([np.nan, 1, 2, 3], unit="D")),
+ ]
+
+ for s in sers:
+ thresh = s[2]
+ lower = s.clip(lower=thresh)
+ upper = s.clip(upper=thresh)
+ assert lower[notna(lower)].min() == thresh
+ assert upper[notna(upper)].max() == thresh
+ assert list(isna(s)) == list(isna(lower))
+ assert list(isna(s)) == list(isna(upper))
+
+ def test_clip_with_na_args(self):
+ """Should process np.nan argument as None """
+ # GH#17276
+ s = Series([1, 2, 3])
+
+ tm.assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
+ tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3]))
+
+ # GH#19992
+ tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, np.nan]))
+ tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, np.nan, 1]))
+
+ def test_clip_against_series(self):
+ # GH#6966
+
+ s = Series([1.0, 1.0, 4.0])
+
+ lower = Series([1.0, 2.0, 3.0])
+ upper = Series([1.5, 2.5, 3.5])
+
+ tm.assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
+ tm.assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
+
+ @pytest.mark.parametrize("inplace", [True, False])
+ @pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
+ def test_clip_against_list_like(self, inplace, upper):
+ # GH#15390
+ original = pd.Series([5, 6, 7])
+ result = original.clip(upper=upper, inplace=inplace)
+ expected = pd.Series([1, 2, 3])
+
+ if inplace:
+ result = original
+ tm.assert_series_equal(result, expected, check_exact=True)
+
+ def test_clip_with_datetimes(self):
+ # GH#11838
+ # naive and tz-aware datetimes
+
+ t = Timestamp("2015-12-01 09:30:30")
+ s = Series([Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:31:00")])
+ result = s.clip(upper=t)
+ expected = Series(
+ [Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:30:30")]
+ )
+ tm.assert_series_equal(result, expected)
+
+ t = Timestamp("2015-12-01 09:30:30", tz="US/Eastern")
+ s = Series(
+ [
+ Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
+ Timestamp("2015-12-01 09:31:00", tz="US/Eastern"),
+ ]
+ )
+ result = s.clip(upper=t)
+ expected = Series(
+ [
+ Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
+ Timestamp("2015-12-01 09:30:30", tz="US/Eastern"),
+ ]
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_describe.py b/pandas/tests/series/methods/test_describe.py
new file mode 100644
index 0000000000000..ed412e7da3d43
--- /dev/null
+++ b/pandas/tests/series/methods/test_describe.py
@@ -0,0 +1,69 @@
+import numpy as np
+
+from pandas import Series, Timestamp, date_range
+import pandas.util.testing as tm
+
+
+class TestSeriesDescribe:
+ def test_describe(self):
+ s = Series([0, 1, 2, 3, 4], name="int_data")
+ result = s.describe()
+ expected = Series(
+ [5, 2, s.std(), 0, 1, 2, 3, 4],
+ name="int_data",
+ index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
+ )
+ tm.assert_series_equal(result, expected)
+
+ s = Series([True, True, False, False, False], name="bool_data")
+ result = s.describe()
+ expected = Series(
+ [5, 2, False, 3], name="bool_data", index=["count", "unique", "top", "freq"]
+ )
+ tm.assert_series_equal(result, expected)
+
+ s = Series(["a", "a", "b", "c", "d"], name="str_data")
+ result = s.describe()
+ expected = Series(
+ [5, 4, "a", 2], name="str_data", index=["count", "unique", "top", "freq"]
+ )
+ tm.assert_series_equal(result, expected)
+
+ def test_describe_empty_object(self):
+ # https://github.com/pandas-dev/pandas/issues/27183
+ s = Series([None, None], dtype=object)
+ result = s.describe()
+ expected = Series(
+ [0, 0, np.nan, np.nan],
+ dtype=object,
+ index=["count", "unique", "top", "freq"],
+ )
+ tm.assert_series_equal(result, expected)
+
+ result = s[:0].describe()
+ tm.assert_series_equal(result, expected)
+ # ensure NaN, not None
+ assert np.isnan(result.iloc[2])
+ assert np.isnan(result.iloc[3])
+
+ def test_describe_with_tz(self, tz_naive_fixture):
+ # GH 21332
+ tz = tz_naive_fixture
+ name = str(tz_naive_fixture)
+ start = Timestamp(2018, 1, 1)
+ end = Timestamp(2018, 1, 5)
+ s = Series(date_range(start, end, tz=tz), name=name)
+ result = s.describe()
+ expected = Series(
+ [
+ 5,
+ 5,
+ s.value_counts().index[0],
+ 1,
+ start.tz_localize(tz),
+ end.tz_localize(tz),
+ ],
+ name=name,
+ index=["count", "unique", "top", "freq", "first", "last"],
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py
new file mode 100644
index 0000000000000..69b2f896aec52
--- /dev/null
+++ b/pandas/tests/series/methods/test_isin.py
@@ -0,0 +1,82 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import Series, date_range
+import pandas.util.testing as tm
+
+
+class TestSeriesIsIn:
+ def test_isin(self):
+ s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
+
+ result = s.isin(["A", "C"])
+ expected = Series([True, False, True, False, False, False, True, True])
+ tm.assert_series_equal(result, expected)
+
+ # GH#16012
+ # This specific issue has to have a series over 1e6 in len, but the
+ # comparison array (in_list) must be large enough so that numpy doesn't
+ # do a manual masking trick that will avoid this issue altogether
+ s = Series(list("abcdefghijk" * 10 ** 5))
+ # If numpy doesn't do the manual comparison/mask, these
+ # unorderable mixed types are what cause the exception in numpy
+ in_list = [-1, "a", "b", "G", "Y", "Z", "E", "K", "E", "S", "I", "R", "R"] * 6
+
+ assert s.isin(in_list).sum() == 200000
+
+ def test_isin_with_string_scalar(self):
+ # GH#4763
+ s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
+ msg = (
+ r"only list-like objects are allowed to be passed to isin\(\),"
+ r" you passed a \[str\]"
+ )
+ with pytest.raises(TypeError, match=msg):
+ s.isin("a")
+
+ s = Series(["aaa", "b", "c"])
+ with pytest.raises(TypeError, match=msg):
+ s.isin("aaa")
+
+ def test_isin_with_i8(self):
+ # GH#5021
+
+ expected = Series([True, True, False, False, False])
+ expected2 = Series([False, True, False, False, False])
+
+ # datetime64[ns]
+ s = Series(date_range("jan-01-2013", "jan-05-2013"))
+
+ result = s.isin(s[0:2])
+ tm.assert_series_equal(result, expected)
+
+ result = s.isin(s[0:2].values)
+ tm.assert_series_equal(result, expected)
+
+ # fails on dtype conversion in the first place
+ result = s.isin(s[0:2].values.astype("datetime64[D]"))
+ tm.assert_series_equal(result, expected)
+
+ result = s.isin([s[1]])
+ tm.assert_series_equal(result, expected2)
+
+ result = s.isin([np.datetime64(s[1])])
+ tm.assert_series_equal(result, expected2)
+
+ result = s.isin(set(s[0:2]))
+ tm.assert_series_equal(result, expected)
+
+ # timedelta64[ns]
+ s = Series(pd.to_timedelta(range(5), unit="d"))
+ result = s.isin(s[0:2])
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
+ def test_isin_empty(self, empty):
+ # see GH#16991
+ s = Series(["a", "b"])
+ expected = Series([False, False])
+
+ result = s.isin(empty)
+ tm.assert_series_equal(expected, result)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 6b85714d06594..86931ae23caee 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -6,76 +6,13 @@
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import Categorical, DataFrame, MultiIndex, Series, date_range, isna, notna
+from pandas import Categorical, DataFrame, MultiIndex, Series, date_range, isna
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas.util.testing as tm
class TestSeriesAnalytics:
- def test_describe(self):
- s = Series([0, 1, 2, 3, 4], name="int_data")
- result = s.describe()
- expected = Series(
- [5, 2, s.std(), 0, 1, 2, 3, 4],
- name="int_data",
- index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
- )
- tm.assert_series_equal(result, expected)
-
- s = Series([True, True, False, False, False], name="bool_data")
- result = s.describe()
- expected = Series(
- [5, 2, False, 3], name="bool_data", index=["count", "unique", "top", "freq"]
- )
- tm.assert_series_equal(result, expected)
-
- s = Series(["a", "a", "b", "c", "d"], name="str_data")
- result = s.describe()
- expected = Series(
- [5, 4, "a", 2], name="str_data", index=["count", "unique", "top", "freq"]
- )
- tm.assert_series_equal(result, expected)
-
- def test_describe_empty_object(self):
- # https://github.com/pandas-dev/pandas/issues/27183
- s = pd.Series([None, None], dtype=object)
- result = s.describe()
- expected = pd.Series(
- [0, 0, np.nan, np.nan],
- dtype=object,
- index=["count", "unique", "top", "freq"],
- )
- tm.assert_series_equal(result, expected)
-
- result = s[:0].describe()
- tm.assert_series_equal(result, expected)
- # ensure NaN, not None
- assert np.isnan(result.iloc[2])
- assert np.isnan(result.iloc[3])
-
- def test_describe_with_tz(self, tz_naive_fixture):
- # GH 21332
- tz = tz_naive_fixture
- name = str(tz_naive_fixture)
- start = Timestamp(2018, 1, 1)
- end = Timestamp(2018, 1, 5)
- s = Series(date_range(start, end, tz=tz), name=name)
- result = s.describe()
- expected = Series(
- [
- 5,
- 5,
- s.value_counts().index[0],
- 1,
- start.tz_localize(tz),
- end.tz_localize(tz),
- ],
- name=name,
- index=["count", "unique", "top", "freq", "first", "last"],
- )
- tm.assert_series_equal(result, expected)
-
def test_argsort(self, datetime_series):
self._check_accum_op("argsort", datetime_series, check_dtype=False)
argsorted = datetime_series.argsort()
@@ -534,172 +471,6 @@ def test_matmul(self):
with pytest.raises(ValueError, match=msg):
a.dot(b.T)
- def test_clip(self, datetime_series):
- val = datetime_series.median()
-
- assert datetime_series.clip(lower=val).min() == val
- assert datetime_series.clip(upper=val).max() == val
-
- result = datetime_series.clip(-0.5, 0.5)
- expected = np.clip(datetime_series, -0.5, 0.5)
- tm.assert_series_equal(result, expected)
- assert isinstance(expected, Series)
-
- def test_clip_types_and_nulls(self):
-
- sers = [
- Series([np.nan, 1.0, 2.0, 3.0]),
- Series([None, "a", "b", "c"]),
- Series(pd.to_datetime([np.nan, 1, 2, 3], unit="D")),
- ]
-
- for s in sers:
- thresh = s[2]
- lower = s.clip(lower=thresh)
- upper = s.clip(upper=thresh)
- assert lower[notna(lower)].min() == thresh
- assert upper[notna(upper)].max() == thresh
- assert list(isna(s)) == list(isna(lower))
- assert list(isna(s)) == list(isna(upper))
-
- def test_clip_with_na_args(self):
- """Should process np.nan argument as None """
- # GH # 17276
- s = Series([1, 2, 3])
-
- tm.assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
- tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3]))
-
- # GH #19992
- tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, np.nan]))
- tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, np.nan, 1]))
-
- def test_clip_against_series(self):
- # GH #6966
-
- s = Series([1.0, 1.0, 4.0])
-
- lower = Series([1.0, 2.0, 3.0])
- upper = Series([1.5, 2.5, 3.5])
-
- tm.assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
- tm.assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
-
- @pytest.mark.parametrize("inplace", [True, False])
- @pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
- def test_clip_against_list_like(self, inplace, upper):
- # GH #15390
- original = pd.Series([5, 6, 7])
- result = original.clip(upper=upper, inplace=inplace)
- expected = pd.Series([1, 2, 3])
-
- if inplace:
- result = original
- tm.assert_series_equal(result, expected, check_exact=True)
-
- def test_clip_with_datetimes(self):
-
- # GH 11838
- # naive and tz-aware datetimes
-
- t = Timestamp("2015-12-01 09:30:30")
- s = Series([Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:31:00")])
- result = s.clip(upper=t)
- expected = Series(
- [Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:30:30")]
- )
- tm.assert_series_equal(result, expected)
-
- t = Timestamp("2015-12-01 09:30:30", tz="US/Eastern")
- s = Series(
- [
- Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
- Timestamp("2015-12-01 09:31:00", tz="US/Eastern"),
- ]
- )
- result = s.clip(upper=t)
- expected = Series(
- [
- Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
- Timestamp("2015-12-01 09:30:30", tz="US/Eastern"),
- ]
- )
- tm.assert_series_equal(result, expected)
-
- def test_isin(self):
- s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
-
- result = s.isin(["A", "C"])
- expected = Series([True, False, True, False, False, False, True, True])
- tm.assert_series_equal(result, expected)
-
- # GH: 16012
- # This specific issue has to have a series over 1e6 in len, but the
- # comparison array (in_list) must be large enough so that numpy doesn't
- # do a manual masking trick that will avoid this issue altogether
- s = Series(list("abcdefghijk" * 10 ** 5))
- # If numpy doesn't do the manual comparison/mask, these
- # unorderable mixed types are what cause the exception in numpy
- in_list = [-1, "a", "b", "G", "Y", "Z", "E", "K", "E", "S", "I", "R", "R"] * 6
-
- assert s.isin(in_list).sum() == 200000
-
- def test_isin_with_string_scalar(self):
- # GH4763
- s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
- msg = (
- r"only list-like objects are allowed to be passed to isin\(\),"
- r" you passed a \[str\]"
- )
- with pytest.raises(TypeError, match=msg):
- s.isin("a")
-
- s = Series(["aaa", "b", "c"])
- with pytest.raises(TypeError, match=msg):
- s.isin("aaa")
-
- def test_isin_with_i8(self):
- # GH 5021
-
- expected = Series([True, True, False, False, False])
- expected2 = Series([False, True, False, False, False])
-
- # datetime64[ns]
- s = Series(date_range("jan-01-2013", "jan-05-2013"))
-
- result = s.isin(s[0:2])
- tm.assert_series_equal(result, expected)
-
- result = s.isin(s[0:2].values)
- tm.assert_series_equal(result, expected)
-
- # fails on dtype conversion in the first place
- result = s.isin(s[0:2].values.astype("datetime64[D]"))
- tm.assert_series_equal(result, expected)
-
- result = s.isin([s[1]])
- tm.assert_series_equal(result, expected2)
-
- result = s.isin([np.datetime64(s[1])])
- tm.assert_series_equal(result, expected2)
-
- result = s.isin(set(s[0:2]))
- tm.assert_series_equal(result, expected)
-
- # timedelta64[ns]
- s = Series(pd.to_timedelta(range(5), unit="d"))
- result = s.isin(s[0:2])
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
- def test_isin_empty(self, empty):
- # see gh-16991
- s = Series(["a", "b"])
- expected = Series([False, False])
-
- result = s.isin(empty)
- tm.assert_series_equal(expected, result)
-
def test_ptp(self):
# GH21614
N = 1000
| https://api.github.com/repos/pandas-dev/pandas/pulls/30381 | 2019-12-20T18:27:03Z | 2019-12-23T17:03:28Z | 2019-12-23T17:03:28Z | 2019-12-23T17:05:38Z | |
CI: troubleshoot codecov | diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index 4c5dbabc81950..0b68164e5767e 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -37,8 +37,7 @@ echo $PYTEST_CMD
sh -c "$PYTEST_CMD"
if [[ "$COVERAGE" && $? == 0 && "$TRAVIS_BRANCH" == "master" ]]; then
- SHA=`git rev-parse HEAD`
echo "uploading coverage"
- echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C $SHA"
- bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C `git rev-parse HEAD`
+ echo "bash <(curl -s https://codecov.io/bash) -Z -c -f $COVERAGE_FNAME"
+ bash <(curl -s https://codecov.io/bash) -Z -c -f $COVERAGE_FNAME
fi
| xref discussion in #30070. | https://api.github.com/repos/pandas-dev/pandas/pulls/30380 | 2019-12-20T17:35:56Z | 2019-12-21T02:16:29Z | 2019-12-21T02:16:29Z | 2019-12-21T02:17:03Z |
BUG: Series.drop() with MultiIndex: inconsistent behaviour | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 716784f798a54..4f5caf8b70d9f 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -789,7 +789,7 @@ MultiIndex
^^^^^^^^^^
- Constructior for :class:`MultiIndex` verifies that the given ``sortorder`` is compatible with the actual ``lexsort_depth`` if ``verify_integrity`` parameter is ``True`` (the default) (:issue:`28735`)
--
+- Series and MultiIndex `.drop` with `MultiIndex` raise exception if labels not in given in level (:issue:`8594`)
-
I/O
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 05a4da28eb0a1..dac9b20104c36 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2065,7 +2065,7 @@ def drop(self, codes, level=None, errors="raise"):
dropped : MultiIndex
"""
if level is not None:
- return self._drop_from_level(codes, level)
+ return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
@@ -2103,13 +2103,15 @@ def drop(self, codes, level=None, errors="raise"):
return self.delete(inds)
- def _drop_from_level(self, codes, level):
+ def _drop_from_level(self, codes, level, errors="raise"):
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
mask = ~algos.isin(self.codes[i], values)
+ if mask.all() and errors != "ignore":
+ raise KeyError(f"labels {codes} not found in level")
return self[mask]
diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py
index ee60f4537ade3..364420a292ed5 100644
--- a/pandas/tests/indexes/multi/test_drop.py
+++ b/pandas/tests/indexes/multi/test_drop.py
@@ -141,6 +141,39 @@ def test_drop_not_lexsorted():
tm.assert_index_equal(lexsorted_mi.drop("a"), not_lexsorted_mi.drop("a"))
+@pytest.mark.parametrize(
+ "msg,labels,level",
+ [
+ (r"labels \[4\] not found in level", 4, "a"),
+ (r"labels \[7\] not found in level", 7, "b"),
+ ],
+)
+def test_drop_raise_exception_if_labels_not_in_level(msg, labels, level):
+ # GH 8594
+ mi = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
+ s = pd.Series([10, 20, 30], index=mi)
+ df = pd.DataFrame([10, 20, 30], index=mi)
+
+ with pytest.raises(KeyError, match=msg):
+ s.drop(labels, level=level)
+ with pytest.raises(KeyError, match=msg):
+ df.drop(labels, level=level)
+
+
+@pytest.mark.parametrize("labels,level", [(4, "a"), (7, "b")])
+def test_drop_errors_ignore(labels, level):
+ # GH 8594
+ mi = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
+ s = pd.Series([10, 20, 30], index=mi)
+ df = pd.DataFrame([10, 20, 30], index=mi)
+
+ expected_s = s.drop(labels, level=level, errors="ignore")
+ tm.assert_series_equal(s, expected_s)
+
+ expected_df = df.drop(labels, level=level, errors="ignore")
+ tm.assert_frame_equal(df, expected_df)
+
+
def test_drop_with_non_unique_datetime_index_and_invalid_keys():
# GH 30399
| - [x] closes #8594
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
description:
In core.base
```
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise KeyError(f"{labels[mask]} not found in axis")
```
single index check and raise if there is no matching indexer. But 'MultiIndex' case,
In indexes.multi
```
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
mask = ~algos.isin(self.codes[i], values)
return self[mask]
```
no check. So i add check logic and raise exception if there is no matching indexer.
This bug cause from 'MultiIndex', so do not distinguish between "Series" and "DataFrame"
| https://api.github.com/repos/pandas-dev/pandas/pulls/30377 | 2019-12-20T14:02:17Z | 2019-12-26T13:35:10Z | 2019-12-26T13:35:09Z | 2019-12-26T13:40:53Z |
[CLN] remove now-unnecessary td.skip_if_no(pathlib) | diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index d9a76fe97f813..3cd9d9cdd67d2 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -3,6 +3,7 @@
from distutils.version import LooseVersion
from io import BytesIO
import os
+from pathlib import Path
import re
from warnings import catch_warnings, simplefilter
@@ -4594,12 +4595,9 @@ def test_read_nokey_empty(self, setup_path):
with pytest.raises(ValueError):
read_hdf(path)
- @td.skip_if_no("pathlib")
def test_read_from_pathlib_path(self, setup_path):
# GH11773
- from pathlib import Path
-
expected = DataFrame(
np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE")
)
diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py
index e37561c865c7a..49af18d2935ef 100644
--- a/pandas/tests/io/sas/test_sas7bdat.py
+++ b/pandas/tests/io/sas/test_sas7bdat.py
@@ -1,5 +1,6 @@
import io
import os
+from pathlib import Path
import numpy as np
import pytest
@@ -68,10 +69,7 @@ def test_from_iterator(self):
tm.assert_frame_equal(df, df0.iloc[2:5, :])
rdr.close()
- @td.skip_if_no("pathlib")
def test_path_pathlib(self):
- from pathlib import Path
-
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index a15eac89ecedb..d2633ea0676cd 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -4,6 +4,7 @@
from io import StringIO
import mmap
import os
+from pathlib import Path
import pytest
@@ -27,14 +28,7 @@ def __fspath__(self):
# Functions that consume a string path and return a string or path-like object
-path_types = [str, CustomFSPath]
-
-try:
- from pathlib import Path
-
- path_types.append(Path)
-except ImportError:
- pass
+path_types = [str, CustomFSPath, Path]
try:
from py.path import local as LocalPath
@@ -73,7 +67,6 @@ def test_expand_user_normal_path(self):
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
- @td.skip_if_no("pathlib")
def test_stringify_path_pathlib(self):
rel_path = icom._stringify_path(Path("."))
assert rel_path == "."
| - [ ] closes #30375
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30376 | 2019-12-20T12:54:07Z | 2019-12-20T14:59:06Z | 2019-12-20T14:59:06Z | 2019-12-20T16:01:53Z |
REF: define NA_VALUES in libparsers | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index bb1493280dfd2..1b566af7a5437 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -1367,7 +1367,26 @@ def _ensure_encoded(list lst):
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
-_NA_VALUES = _ensure_encoded(list(icom._NA_VALUES))
+STR_NA_VALUES = {
+ "-1.#IND",
+ "1.#QNAN",
+ "1.#IND",
+ "-1.#QNAN",
+ "#N/A N/A",
+ "#N/A",
+ "N/A",
+ "n/a",
+ "NA",
+ "#NA",
+ "NULL",
+ "null",
+ "NaN",
+ "-NaN",
+ "nan",
+ "-nan",
+ "",
+}
+_NA_VALUES = _ensure_encoded(list(STR_NA_VALUES))
def _maybe_upcast(arr):
diff --git a/pandas/io/common.py b/pandas/io/common.py
index a01011cd7d4e4..0159716248b11 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -47,29 +47,6 @@
lzma = _import_lzma()
-# common NA values
-# no longer excluding inf representations
-# '1.#INF','-1.#INF', '1.#INF000000',
-_NA_VALUES = {
- "-1.#IND",
- "1.#QNAN",
- "1.#IND",
- "-1.#QNAN",
- "#N/A N/A",
- "#N/A",
- "N/A",
- "n/a",
- "NA",
- "#NA",
- "NULL",
- "null",
- "NaN",
- "-NaN",
- "nan",
- "-nan",
- "",
-}
-
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard("")
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 81d3d46f78bdb..8368142c3633a 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -6,6 +6,7 @@
from pandas._config import config
+from pandas._libs.parsers import STR_NA_VALUES
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender
@@ -14,7 +15,6 @@
from pandas.core.frame import DataFrame
from pandas.io.common import (
- _NA_VALUES,
_is_url,
_stringify_path,
_validate_header_arg,
@@ -124,7 +124,7 @@
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '"""
- + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ")
+ + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a887a537a2201..32d812637a067 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -17,6 +17,7 @@
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas._libs.parsers as parsers
+from pandas._libs.parsers import STR_NA_VALUES
from pandas._libs.tslibs import parsing
from pandas.errors import (
AbstractMethodError,
@@ -60,7 +61,6 @@
from pandas.core.tools import datetimes as tools
from pandas.io.common import (
- _NA_VALUES,
BaseIterator,
UnicodeReader,
UTF8Recoder,
@@ -195,7 +195,7 @@
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
- + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ")
+ + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
@@ -3398,7 +3398,7 @@ def _clean_na_values(na_values, keep_default_na=True):
if na_values is None:
if keep_default_na:
- na_values = _NA_VALUES
+ na_values = STR_NA_VALUES
else:
na_values = set()
na_fvalues = set()
@@ -3415,7 +3415,7 @@ def _clean_na_values(na_values, keep_default_na=True):
v = [v]
if keep_default_na:
- v = set(v) | _NA_VALUES
+ v = set(v) | STR_NA_VALUES
na_values[k] = v
na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}
@@ -3424,7 +3424,7 @@ def _clean_na_values(na_values, keep_default_na=True):
na_values = [na_values]
na_values = _stringify_na_values(na_values)
if keep_default_na:
- na_values = na_values | _NA_VALUES
+ na_values = na_values | STR_NA_VALUES
na_fvalues = _floatify_na_values(na_values)
@@ -3575,7 +3575,7 @@ def _get_na_values(col, na_values, na_fvalues, keep_default_na):
return na_values[col], na_fvalues[col]
else:
if keep_default_na:
- return _NA_VALUES, set()
+ return STR_NA_VALUES, set()
return set(), set()
else:
diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py
index f52c6b8858fd3..353d309a84823 100644
--- a/pandas/tests/io/parser/test_na_values.py
+++ b/pandas/tests/io/parser/test_na_values.py
@@ -7,11 +7,11 @@
import numpy as np
import pytest
+from pandas._libs.parsers import STR_NA_VALUES
+
from pandas import DataFrame, Index, MultiIndex
import pandas.util.testing as tm
-import pandas.io.common as com
-
def test_string_nas(all_parsers):
parser = all_parsers
@@ -99,7 +99,7 @@ def test_default_na_values(all_parsers):
"#N/A N/A",
"",
}
- assert _NA_VALUES == com._NA_VALUES
+ assert _NA_VALUES == STR_NA_VALUES
parser = all_parsers
nv = len(_NA_VALUES)
| The usual goal: make cython code depend on non-cython code less
The new goal: limit the places where we import from `pd.io.common` so I can track down the many encoding/path/compression... inconsistencies. | https://api.github.com/repos/pandas-dev/pandas/pulls/30373 | 2019-12-20T05:01:35Z | 2019-12-20T12:27:14Z | 2019-12-20T12:27:14Z | 2019-12-20T16:23:03Z |
REF: remove last major pytables state-altering | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index d14b4ecf070a7..4d60bc0b45c70 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3145,15 +3145,25 @@ class Table(Fixed):
info: Dict
def __init__(
- self, parent: HDFStore, group: "Node", encoding=None, errors: str = "strict"
+ self,
+ parent: HDFStore,
+ group: "Node",
+ encoding=None,
+ errors: str = "strict",
+ index_axes=None,
+ non_index_axes=None,
+ values_axes=None,
+ data_columns=None,
+ info=None,
+ nan_rep=None,
):
super().__init__(parent, group, encoding=encoding, errors=errors)
- self.index_axes = []
- self.non_index_axes = []
- self.values_axes = []
- self.data_columns = []
- self.info = dict()
- self.nan_rep = None
+ self.index_axes = index_axes or []
+ self.non_index_axes = non_index_axes or []
+ self.values_axes = values_axes or []
+ self.data_columns = data_columns or []
+ self.info = info or dict()
+ self.nan_rep = nan_rep
@property
def table_type_short(self) -> str:
@@ -3635,23 +3645,28 @@ def _create_axes(
data_columns=None,
min_itemsize=None,
):
- """ create and return the axes
- legacy tables create an indexable column, indexable index,
- non-indexable fields
-
- Parameters
- ----------
- axes: a list of the axes in order to create (names or numbers of
- the axes)
- obj : the object to create axes on
- validate: validate the obj against an existing object already
- written
- min_itemsize: a dict of the min size for a column in bytes
- nan_rep : a values to use for string column nan_rep
- encoding : the encoding for string values
- data_columns : a list of columns that we want to create separate to
- allow indexing (or True will force all columns)
+ """
+ Create and return the axes.
+
+ Parameters
+ ----------
+ axes: list or None
+ The names or numbers of the axes to create.
+ obj : DataFrame
+ The object to create axes on.
+ validate: bool, default True
+ Whether to validate the obj against an existing object already written.
+ nan_rep :
+ A value to use for string column nan_rep.
+ data_columns : List[str], True, or None, default None
+ Specify the columns that we want to create to allow indexing on.
+ * True : Use all available columns.
+ * None : Use no columns.
+ * List[str] : Use the specified columns.
+
+ min_itemsize: Dict[str, int] or None, default None
+ The min itemsize for a column in bytes.
"""
if not isinstance(obj, DataFrame):
@@ -3670,15 +3685,15 @@ def _create_axes(
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
- existing_table = self.copy()
+ table_exists = True
axes = [a.axis for a in self.index_axes]
- data_columns = self.data_columns
+ data_columns = list(self.data_columns)
nan_rep = self.nan_rep
- new_info = self.info
# TODO: do we always have validate=True here?
else:
- existing_table = None
- new_info = self.info
+ table_exists = False
+
+ new_info = self.info
assert self.ndim == 2 # with next check, we must have len(axes) == 1
# currently support on ndim-1 axes
@@ -3700,9 +3715,9 @@ def _create_axes(
a = obj.axes[idx]
# we might be able to change the axes on the appending data if necessary
append_axis = list(a)
- if existing_table is not None:
+ if table_exists:
indexer = len(new_non_index_axes) # i.e. 0
- exist_axis = existing_table.non_index_axes[indexer][1]
+ exist_axis = self.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis), np.array(exist_axis)):
# ahah! -> reindex
@@ -3721,8 +3736,8 @@ def _create_axes(
# Now we can construct our new index axis
idx = axes[0]
a = obj.axes[idx]
- index_name = obj._AXIS_NAMES[idx]
- new_index = _convert_index(index_name, a, self.encoding, self.errors)
+ axis_name = obj._AXIS_NAMES[idx]
+ new_index = _convert_index(axis_name, a, self.encoding, self.errors)
new_index.axis = idx
# Because we are always 2D, there is only one new_index, so
@@ -3749,9 +3764,11 @@ def get_blk_items(mgr, blocks):
data_columns = self.validate_data_columns(
data_columns, min_itemsize, new_non_index_axes
)
+
block_obj = self.get_object(obj, transposed)._consolidate()
+
blocks, blk_items = self._get_blocks_and_items(
- block_obj, existing_table, new_non_index_axes, data_columns
+ block_obj, table_exists, new_non_index_axes, self.values_axes, data_columns
)
# add my values
@@ -3772,13 +3789,15 @@ def get_blk_items(mgr, blocks):
# make sure that we match up the existing columns
# if we have an existing table
- if existing_table is not None and validate:
+ existing_col: Optional[DataCol]
+
+ if table_exists and validate:
try:
- existing_col = existing_table.values_axes[i]
+ existing_col = self.values_axes[i]
except (IndexError, KeyError):
raise ValueError(
f"Incompatible appended table [{blocks}]"
- f"with existing table [{existing_table.values_axes}]"
+ f"with existing table [{self.values_axes}]"
)
else:
existing_col = None
@@ -3827,22 +3846,34 @@ def get_blk_items(mgr, blocks):
j += 1
- self.nan_rep = nan_rep
- self.data_columns = [col.name for col in vaxes if col.is_data_indexable]
- self.values_axes = vaxes
- self.index_axes = new_index_axes
- self.non_index_axes = new_non_index_axes
+ dcs = [col.name for col in vaxes if col.is_data_indexable]
- # validate our min_itemsize
- self.validate_min_itemsize(min_itemsize)
+ new_table = type(self)(
+ parent=self.parent,
+ group=self.group,
+ encoding=self.encoding,
+ errors=self.errors,
+ index_axes=new_index_axes,
+ non_index_axes=new_non_index_axes,
+ values_axes=vaxes,
+ data_columns=dcs,
+ info=new_info,
+ nan_rep=nan_rep,
+ )
+ if hasattr(self, "levels"):
+ # TODO: get this into constructor, only for appropriate subclass
+ new_table.levels = self.levels
+
+ new_table.validate_min_itemsize(min_itemsize)
+
+ if validate and table_exists:
+ new_table.validate(self)
- # validate the axes if we have an existing table
- if validate:
- self.validate(existing_table)
+ return new_table
@staticmethod
def _get_blocks_and_items(
- block_obj, existing_table, new_non_index_axes, data_columns
+ block_obj, table_exists, new_non_index_axes, values_axes, data_columns
):
# Helper to clarify non-state-altering parts of _create_axes
@@ -3864,15 +3895,15 @@ def get_blk_items(mgr, blocks):
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
- # reorder the blocks in the same order as the existing_table if we can
- if existing_table is not None:
+ # reorder the blocks in the same order as the existing table if we can
+ if table_exists:
by_items = {
tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)
}
new_blocks = []
new_blk_items = []
- for ea in existing_table.values_axes:
+ for ea in values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
@@ -4103,7 +4134,7 @@ def write(
self._handle.remove_node(self.group, "table")
# create the axes
- self._create_axes(
+ table = self._create_axes(
axes=axes,
obj=obj,
validate=append,
@@ -4112,13 +4143,13 @@ def write(
data_columns=data_columns,
)
- for a in self.axes:
+ for a in table.axes:
a.validate_names()
- if not self.is_exists:
+ if not table.is_exists:
# create the table
- options = self.create_description(
+ options = table.create_description(
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
@@ -4126,20 +4157,20 @@ def write(
)
# set the table attributes
- self.set_attrs()
+ table.set_attrs()
# create the table
- self._handle.create_table(self.group, **options)
+ table._handle.create_table(table.group, **options)
# update my info
- self.attrs.info = self.info
+ table.attrs.info = table.info
# validate the axes and set the kinds
- for a in self.axes:
- a.validate_and_set(self, append)
+ for a in table.axes:
+ a.validate_and_set(table, append)
# add the rows
- self.write_data(chunksize, dropna=dropna)
+ table.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize: Optional[int], dropna: bool = False):
""" we form the data into a 2-d including indexes,values,mask
| Mentioned in #30344. | https://api.github.com/repos/pandas-dev/pandas/pulls/30372 | 2019-12-20T03:09:03Z | 2019-12-26T01:07:06Z | 2019-12-26T01:07:06Z | 2019-12-26T01:11:52Z |
CLN: remove py2-legacy UnicodeReader, UnicodeWriter | diff --git a/pandas/io/common.py b/pandas/io/common.py
index 7151a34cd37de..c62de76286610 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -2,7 +2,6 @@
import bz2
import codecs
-import csv
import gzip
from io import BufferedIOBase, BytesIO
import mmap
@@ -17,9 +16,7 @@
List,
Mapping,
Optional,
- TextIO,
Tuple,
- Type,
Union,
)
from urllib.parse import ( # noqa
@@ -574,16 +571,3 @@ def next(self) -> bytes:
def close(self):
self.reader.close()
-
-
-# Keeping these class for now because it provides a necessary convenience
-# for "dropping" the "encoding" argument from our I/O arguments when
-# creating a Unicode I/O object.
-def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
- return csv.reader(f, dialect=dialect, **kwds)
-
-
-def UnicodeWriter(
- f: TextIO, dialect: Type[csv.Dialect] = csv.excel, encoding: str = "utf-8", **kwds
-):
- return csv.writer(f, dialect=dialect, **kwds)
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index c0071028a8ef4..3a91d65ab4562 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -5,7 +5,7 @@
import csv as csvlib
from io import StringIO
import os
-from typing import Any, Dict, List
+from typing import List
import warnings
from zipfile import ZipFile
@@ -22,7 +22,6 @@
from pandas.core.dtypes.missing import notna
from pandas.io.common import (
- UnicodeWriter,
get_compression_method,
get_filepath_or_buffer,
get_handle,
@@ -188,7 +187,9 @@ def save(self):
close = True
try:
- writer_kwargs: Dict[str, Any] = dict(
+ # Note: self.encoding is irrelevant here
+ self.writer = csvlib.writer(
+ f,
lineterminator=self.line_terminator,
delimiter=self.sep,
quoting=self.quoting,
@@ -196,10 +197,6 @@ def save(self):
escapechar=self.escapechar,
quotechar=self.quotechar,
)
- if self.encoding == "ascii":
- self.writer = csvlib.writer(f, **writer_kwargs)
- else:
- self.writer = UnicodeWriter(f, encoding=self.encoding, **writer_kwargs)
self._save()
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index cc3d2bd12ca35..37cd36a2be3bc 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -63,7 +63,6 @@
from pandas.io.common import (
BaseIterator,
- UnicodeReader,
UTF8Recoder,
get_filepath_or_buffer,
get_handle,
@@ -2431,23 +2430,13 @@ class MyDialect(csv.Dialect):
self.line_pos += 1
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
- if self.encoding is not None:
- self.buf.extend(
- list(
- UnicodeReader(
- StringIO(line), dialect=dia, encoding=self.encoding
- )
- )
- )
- else:
- self.buf.extend(list(csv.reader(StringIO(line), dialect=dia)))
- if self.encoding is not None:
- reader = UnicodeReader(
- f, dialect=dia, encoding=self.encoding, strict=True
- )
- else:
- reader = csv.reader(f, dialect=dia, strict=True)
+ # Note: self.encoding is irrelevant here
+ line_rdr = csv.reader(StringIO(line), dialect=dia)
+ self.buf.extend(list(line_rdr))
+
+ # Note: self.encoding is irrelevant here
+ reader = csv.reader(f, dialect=dia, strict=True)
else:
| By getting rid of these one-liners, the few places where they are used end up being simplified a bit. | https://api.github.com/repos/pandas-dev/pandas/pulls/30371 | 2019-12-20T02:36:44Z | 2019-12-20T18:46:53Z | 2019-12-20T18:46:53Z | 2019-12-20T18:47:14Z |
CLN: use stdlib Iterator instead of BaseIterator | diff --git a/pandas/io/common.py b/pandas/io/common.py
index c62de76286610..d682604cf7aab 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -2,6 +2,7 @@
import bz2
import codecs
+from collections.abc import Iterator
import gzip
from io import BufferedIOBase, BytesIO
import mmap
@@ -49,18 +50,6 @@
_VALID_URLS.discard("")
-class BaseIterator:
- """Subclass this and provide a "__next__()" method to obtain an iterator.
- Useful only when the object being iterated is non-reusable (e.g. OK for a
- parser, not for an in-memory table, yes for its iterator)."""
-
- def __iter__(self) -> "BaseIterator":
- return self
-
- def __next__(self):
- raise AbstractMethodError(self)
-
-
def is_url(url) -> bool:
"""
Check to see if a URL has a valid protocol.
@@ -515,7 +504,7 @@ def closed(self):
return self.fp is None
-class _MMapWrapper(BaseIterator):
+class _MMapWrapper(Iterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
@@ -552,7 +541,7 @@ def __next__(self) -> str:
return newline
-class UTF8Recoder(BaseIterator):
+class UTF8Recoder(Iterator):
"""
Iterator that reads an encoded stream and re-encodes the input to UTF-8
"""
@@ -566,7 +555,7 @@ def read(self, bytes: int = -1) -> bytes:
def readline(self) -> bytes:
return self.reader.readline().encode("utf-8")
- def next(self) -> bytes:
+ def __next__(self) -> bytes:
return next(self.reader).encode("utf-8")
def close(self):
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 14a272e15bc29..7444ebbaf27e3 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -1,4 +1,5 @@
from collections import OrderedDict
+from collections.abc import Iterator
import functools
from io import StringIO
from itertools import islice
@@ -19,7 +20,6 @@
from pandas.core.reshape.concat import concat
from pandas.io.common import (
- BaseIterator,
get_filepath_or_buffer,
get_handle,
infer_compression,
@@ -616,7 +616,7 @@ def read_json(
return result
-class JsonReader(BaseIterator):
+class JsonReader(Iterator):
"""
JsonReader provides an interface for reading in a JSON file.
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 37cd36a2be3bc..6699575e61656 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -3,6 +3,7 @@
"""
from collections import defaultdict
+from collections.abc import Iterator
import csv
import datetime
from io import StringIO
@@ -62,7 +63,6 @@
from pandas.core.tools import datetimes as tools
from pandas.io.common import (
- BaseIterator,
UTF8Recoder,
get_filepath_or_buffer,
get_handle,
@@ -786,7 +786,7 @@ def read_fwf(
return _read(filepath_or_buffer, kwds)
-class TextFileReader(BaseIterator):
+class TextFileReader(Iterator):
"""
Passed dialect overrides any of the related parser options
@@ -3582,7 +3582,7 @@ def _get_col_names(colspec, columns):
return colnames
-class FixedWidthReader(BaseIterator):
+class FixedWidthReader(Iterator):
"""
A reader of fixed-width lines.
"""
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index eb57d703cd4d5..fe96b94e368e3 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -13,6 +13,7 @@
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
+from collections.abc import Iterator
from datetime import datetime
import struct
@@ -22,7 +23,7 @@
import pandas as pd
-from pandas.io.common import BaseIterator, get_filepath_or_buffer
+from pandas.io.common import get_filepath_or_buffer
from pandas.io.sas._sas import Parser
import pandas.io.sas.sas_constants as const
@@ -36,7 +37,7 @@ class _column:
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
-class SAS7BDATReader(BaseIterator):
+class SAS7BDATReader(Iterator):
"""
Read SAS files in SAS7BDAT format.
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 9aa8ed1dfeb5d..ccaee56383a5f 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -7,7 +7,7 @@
https://support.sas.com/techsup/technote/ts140.pdf
"""
-
+from collections.abc import Iterator
from datetime import datetime
from io import BytesIO
import struct
@@ -19,7 +19,7 @@
import pandas as pd
-from pandas.io.common import BaseIterator, get_filepath_or_buffer
+from pandas.io.common import get_filepath_or_buffer
_correct_line1 = (
"HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
@@ -251,7 +251,7 @@ def _parse_float_vec(vec):
return ieee
-class XportReader(BaseIterator):
+class XportReader(Iterator):
__doc__ = _xport_reader_doc
def __init__(
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 84dd302fc293f..fc54a1fa2370d 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -9,7 +9,7 @@
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
-
+from collections.abc import Iterator
import datetime
from io import BytesIO
import os
@@ -44,7 +44,7 @@
from pandas.core.frame import DataFrame
from pandas.core.series import Series
-from pandas.io.common import BaseIterator, get_filepath_or_buffer, stringify_path
+from pandas.io.common import get_filepath_or_buffer, stringify_path
_version_error = (
"Version of given Stata file is not 104, 105, 108, "
@@ -1010,7 +1010,7 @@ def __init__(self):
)
-class StataReader(StataParser, BaseIterator):
+class StataReader(StataParser, Iterator):
__doc__ = _stata_reader_doc
def __init__(
| https://api.github.com/repos/pandas-dev/pandas/pulls/30370 | 2019-12-20T02:12:24Z | 2019-12-23T09:15:09Z | 2019-12-23T09:15:09Z | 2019-12-24T22:21:45Z | |
Clean Up nogil Warning From Parsers | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 1b566af7a5437..5122bb3d4e75b 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -171,12 +171,9 @@ cdef extern from "parser/tokenizer.h":
int64_t skip_first_N_rows
int64_t skipfooter
# pick one, depending on whether the converter requires GIL
- float64_t (*double_converter_nogil)(const char *, char **,
- char, char, char,
- int, int *, int *) nogil
- float64_t (*double_converter_withgil)(const char *, char **,
- char, char, char,
- int, int *, int *)
+ float64_t (*double_converter)(const char *, char **,
+ char, char, char,
+ int, int *, int *) nogil
# error handling
char *warn_msg
@@ -469,16 +466,11 @@ cdef class TextReader:
if float_precision == "round_trip":
# see gh-15140
- #
- # Our current roundtrip implementation requires the GIL.
- self.parser.double_converter_nogil = NULL
- self.parser.double_converter_withgil = round_trip
+ self.parser.double_converter = round_trip
elif float_precision == "high":
- self.parser.double_converter_withgil = NULL
- self.parser.double_converter_nogil = precise_xstrtod
+ self.parser.double_converter = precise_xstrtod
else:
- self.parser.double_converter_withgil = NULL
- self.parser.double_converter_nogil = xstrtod
+ self.parser.double_converter = xstrtod
if isinstance(dtype, dict):
dtype = {k: pandas_dtype(dtype[k])
@@ -1663,22 +1655,12 @@ cdef _try_double(parser_t *parser, int64_t col,
result = np.empty(lines, dtype=np.float64)
data = <float64_t *>result.data
na_fset = kset_float64_from_list(na_flist)
- if parser.double_converter_nogil != NULL: # if it can run without the GIL
- with nogil:
- error = _try_double_nogil(parser, parser.double_converter_nogil,
- col, line_start, line_end,
- na_filter, na_hashset, use_na_flist,
- na_fset, NA, data, &na_count)
- else:
- assert parser.double_converter_withgil != NULL
- error = _try_double_nogil(parser,
- <float64_t (*)(const char *, char **,
- char, char, char,
- int, int *, int *)
- nogil>parser.double_converter_withgil,
+ with nogil:
+ error = _try_double_nogil(parser, parser.double_converter,
col, line_start, line_end,
na_filter, na_hashset, use_na_flist,
na_fset, NA, data, &na_count)
+
kh_destroy_float64(na_fset)
if error != 0:
return None, None
diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index 9f2b26b0dea19..2188ff6b0d464 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -1774,11 +1774,18 @@ double precise_xstrtod(const char *str, char **endptr, char decimal,
double round_trip(const char *p, char **q, char decimal, char sci, char tsep,
int skip_trailing, int *error, int *maybe_int) {
+ // This is called from a nogil block in parsers.pyx
+ // so need to explicitly get GIL before Python calls
+ PyGILState_STATE gstate;
+ gstate = PyGILState_Ensure();
+
double r = PyOS_string_to_double(p, q, 0);
if (maybe_int != NULL) *maybe_int = 0;
if (PyErr_Occurred() != NULL) *error = -1;
else if (r == Py_HUGE_VAL) *error = (int)Py_HUGE_VAL;
PyErr_Clear();
+
+ PyGILState_Release(gstate);
return r;
}
diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h
index b37de47662feb..4fd2065c07100 100644
--- a/pandas/_libs/src/parser/tokenizer.h
+++ b/pandas/_libs/src/parser/tokenizer.h
@@ -155,11 +155,8 @@ typedef struct parser_t {
PyObject *skipfunc;
int64_t skip_first_N_rows;
int64_t skip_footer;
- // pick one, depending on whether the converter requires GIL
- double (*double_converter_nogil)(const char *, char **,
- char, char, char, int, int *, int *);
- double (*double_converter_withgil)(const char *, char **,
- char, char, char, int, int *, int *);
+ double (*double_converter)(const char *, char **,
+ char, char, char, int, int *, int *);
// error handling
char *warn_msg;
@@ -226,6 +223,8 @@ double xstrtod(const char *p, char **q, char decimal, char sci, char tsep,
double precise_xstrtod(const char *p, char **q, char decimal,
char sci, char tsep, int skip_trailing,
int *error, int *maybe_int);
+
+// GH-15140 - round_trip requires and acquires the GIL on its own
double round_trip(const char *p, char **q, char decimal, char sci, char tsep,
int skip_trailing, int *error, int *maybe_int);
int to_boolean(const char *item, uint8_t *val);
| I think just easier to ensure The GIL is acquired within the C function rather than all of the current casting / indirection.
Eliminates the following warning:
```sh
warning: pandas/_libs/parsers.pyx:1656:34: Casting a GIL-requiring function into a nogil function circumvents GIL validation
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/30369 | 2019-12-20T01:47:44Z | 2020-01-02T01:06:23Z | 2020-01-02T01:06:23Z | 2020-01-16T00:33:26Z |
REF: de-privatize io.common functions used elsewhere | diff --git a/pandas/io/common.py b/pandas/io/common.py
index 0159716248b11..7151a34cd37de 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -64,7 +64,7 @@ def __next__(self):
raise AbstractMethodError(self)
-def _is_url(url) -> bool:
+def is_url(url) -> bool:
"""
Check to see if a URL has a valid protocol.
@@ -102,7 +102,7 @@ def _expand_user(
return filepath_or_buffer
-def _validate_header_arg(header) -> None:
+def validate_header_arg(header) -> None:
if isinstance(header, bool):
raise TypeError(
"Passing a bool to header is invalid. "
@@ -112,7 +112,7 @@ def _validate_header_arg(header) -> None:
)
-def _stringify_path(
+def stringify_path(
filepath_or_buffer: FilePathOrBuffer[AnyStr],
) -> FilePathOrBuffer[AnyStr]:
"""Attempt to convert a path-like object to a string.
@@ -193,9 +193,9 @@ def get_filepath_or_buffer(
compression, str,
should_close, bool)
"""
- filepath_or_buffer = _stringify_path(filepath_or_buffer)
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
- if isinstance(filepath_or_buffer, str) and _is_url(filepath_or_buffer):
+ if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
req = urlopen(filepath_or_buffer)
content_encoding = req.headers.get("Content-Encoding", None)
if content_encoding == "gzip":
@@ -250,7 +250,7 @@ def file_path_to_url(path: str) -> str:
_compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"}
-def _get_compression_method(
+def get_compression_method(
compression: Optional[Union[str, Mapping[str, str]]]
) -> Tuple[Optional[str], Dict[str, str]]:
"""
@@ -283,7 +283,7 @@ def _get_compression_method(
return compression, compression_args
-def _infer_compression(
+def infer_compression(
filepath_or_buffer: FilePathOrBuffer, compression: Optional[str]
) -> Optional[str]:
"""
@@ -317,7 +317,7 @@ def _infer_compression(
# Infer compression
if compression == "infer":
# Convert all path types (e.g. pathlib.Path) to strings
- filepath_or_buffer = _stringify_path(filepath_or_buffer)
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
# Cannot infer compression of a buffer, assume no compression
return None
@@ -338,7 +338,7 @@ def _infer_compression(
raise ValueError(msg)
-def _get_handle(
+def get_handle(
path_or_buf,
mode: str,
encoding=None,
@@ -396,12 +396,12 @@ def _get_handle(
f = path_or_buf
# Convert pathlib.Path/py.path.local or string
- path_or_buf = _stringify_path(path_or_buf)
+ path_or_buf = stringify_path(path_or_buf)
is_path = isinstance(path_or_buf, str)
- compression, compression_args = _get_compression_method(compression)
+ compression, compression_args = get_compression_method(compression)
if is_path:
- compression = _infer_compression(path_or_buf, compression)
+ compression = infer_compression(path_or_buf, compression)
if compression:
@@ -421,7 +421,7 @@ def _get_handle(
# ZIP Compression
elif compression == "zip":
- zf = BytesZipFile(path_or_buf, mode, **compression_args)
+ zf = _BytesZipFile(path_or_buf, mode, **compression_args)
# Ensure the container is closed as well.
handles.append(zf)
if zf.mode == "w":
@@ -472,7 +472,7 @@ def _get_handle(
if memory_map and hasattr(f, "fileno"):
try:
- wrapped = MMapWrapper(f)
+ wrapped = _MMapWrapper(f)
f.close()
f = wrapped
except Exception:
@@ -485,7 +485,7 @@ def _get_handle(
return f, handles
-class BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore
+class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore
"""
Wrapper for standard library class ZipFile and allow the returned file-like
handle to accept byte strings via `write` method.
@@ -518,7 +518,7 @@ def closed(self):
return self.fp is None
-class MMapWrapper(BaseIterator):
+class _MMapWrapper(BaseIterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
@@ -537,7 +537,7 @@ def __init__(self, f: IO):
def __getattr__(self, name: str):
return getattr(self.mmap, name)
- def __iter__(self) -> "MMapWrapper":
+ def __iter__(self) -> "_MMapWrapper":
return self
def __next__(self) -> str:
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 8368142c3633a..553334407d12e 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -15,11 +15,11 @@
from pandas.core.frame import DataFrame
from pandas.io.common import (
- _is_url,
- _stringify_path,
- _validate_header_arg,
get_filepath_or_buffer,
+ is_url,
+ stringify_path,
urlopen,
+ validate_header_arg,
)
from pandas.io.excel._util import (
_fill_mi_header,
@@ -339,7 +339,7 @@ def read_excel(
class _BaseExcelReader(metaclass=abc.ABCMeta):
def __init__(self, filepath_or_buffer):
# If filepath_or_buffer is a url, load the data into a BytesIO
- if _is_url(filepath_or_buffer):
+ if is_url(filepath_or_buffer):
filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read())
elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
filepath_or_buffer, _, _, _ = get_filepath_or_buffer(filepath_or_buffer)
@@ -408,7 +408,7 @@ def parse(
**kwds,
):
- _validate_header_arg(header)
+ validate_header_arg(header)
ret_dict = False
@@ -708,7 +708,7 @@ def __init__(
self.mode = mode
def __fspath__(self):
- return _stringify_path(self.path)
+ return stringify_path(self.path)
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
@@ -808,7 +808,7 @@ def __init__(self, io, engine=None):
# could be a str, ExcelFile, Book, etc.
self.io = io
# Always a string
- self._io = _stringify_path(io)
+ self._io = stringify_path(io)
self._reader = self._engines[engine](self._io)
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 41bdf97c1fe1f..eb05004d9137c 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -4,7 +4,7 @@
from pandas import DataFrame, Int64Index, RangeIndex
-from pandas.io.common import _stringify_path
+from pandas.io.common import stringify_path
def to_feather(df: DataFrame, path):
@@ -20,7 +20,7 @@ def to_feather(df: DataFrame, path):
import_optional_dependency("pyarrow")
from pyarrow import feather
- path = _stringify_path(path)
+ path = stringify_path(path)
if not isinstance(df, DataFrame):
raise ValueError("feather only support IO with DataFrames")
@@ -98,6 +98,6 @@ def read_feather(path, columns=None, use_threads: bool = True):
import_optional_dependency("pyarrow")
from pyarrow import feather
- path = _stringify_path(path)
+ path = stringify_path(path)
return feather.read_feather(path, columns=columns, use_threads=bool(use_threads))
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index ae5d1d30bcddb..c0071028a8ef4 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -23,10 +23,10 @@
from pandas.io.common import (
UnicodeWriter,
- _get_compression_method,
- _get_handle,
- _infer_compression,
+ get_compression_method,
get_filepath_or_buffer,
+ get_handle,
+ infer_compression,
)
@@ -61,7 +61,7 @@ def __init__(
path_or_buf = StringIO()
# Extract compression mode as given, if dict
- compression, self.compression_args = _get_compression_method(compression)
+ compression, self.compression_args = get_compression_method(compression)
self.path_or_buf, _, _, _ = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression, mode=mode
@@ -78,7 +78,7 @@ def __init__(
if encoding is None:
encoding = "utf-8"
self.encoding = encoding
- self.compression = _infer_compression(self.path_or_buf, compression)
+ self.compression = infer_compression(self.path_or_buf, compression)
if quoting is None:
quoting = csvlib.QUOTE_MINIMAL
@@ -179,7 +179,7 @@ def save(self):
f = self.path_or_buf
close = False
else:
- f, handles = _get_handle(
+ f, handles = get_handle(
self.path_or_buf,
self.mode,
encoding=self.encoding,
@@ -212,7 +212,7 @@ def save(self):
else:
compression = dict(self.compression_args, method=self.compression)
- f, handles = _get_handle(
+ f, handles = get_handle(
self.path_or_buf,
self.mode,
encoding=self.encoding,
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 2f7a80eea1554..18340bc702378 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -15,6 +15,7 @@
from pandas import Index
import pandas.core.common as com
+from pandas.io.common import stringify_path
from pandas.io.formats.css import CSSResolver, CSSWarning
from pandas.io.formats.format import get_level_lengths
from pandas.io.formats.printing import pprint_thing
@@ -711,7 +712,6 @@ def write(
and ``io.excel.xlsm.writer``.
"""
from pandas.io.excel import ExcelWriter
- from pandas.io.common import _stringify_path
num_rows, num_cols = self.df.shape
if num_rows > self.max_rows or num_cols > self.max_cols:
@@ -724,7 +724,7 @@ def write(
if isinstance(writer, ExcelWriter):
need_save = False
else:
- writer = ExcelWriter(_stringify_path(writer), engine=engine)
+ writer = ExcelWriter(stringify_path(writer), engine=engine)
need_save = True
formatted_cells = self.get_formatted_cells()
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 124bd31c8d308..b0574925cf1b1 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -72,7 +72,7 @@
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
-from pandas.io.common import _stringify_path
+from pandas.io.common import stringify_path
from pandas.io.formats.printing import adjoin, justify, pprint_thing
if TYPE_CHECKING:
@@ -482,7 +482,7 @@ def get_buffer(
objects, otherwise yield buf unchanged.
"""
if buf is not None:
- buf = _stringify_path(buf)
+ buf = stringify_path(buf)
else:
buf = StringIO()
diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index 0c6b0c1a5810b..3a3347a5c86ea 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -12,7 +12,7 @@
from pandas import option_context
-from pandas.io.common import _is_url
+from pandas.io.common import is_url
from pandas.io.formats.format import (
DataFrameFormatter,
TableFormatter,
@@ -147,7 +147,7 @@ def _write_cell(
rs = pprint_thing(s, escape_chars=esc).strip()
- if self.render_links and _is_url(rs):
+ if self.render_links and is_url(rs):
rs_unescaped = pprint_thing(s, escape_chars={}).strip()
start_tag += '<a href="{url}" target="_blank">'.format(url=rs_unescaped)
end_a = "</a>"
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 3521bad375aa6..eafcca0e85bb3 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -16,7 +16,7 @@
from pandas.core.construction import create_series_with_explicit_dtype
-from pandas.io.common import _is_url, _validate_header_arg, urlopen
+from pandas.io.common import is_url, urlopen, validate_header_arg
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
@@ -117,7 +117,7 @@ def _read(obj):
-------
raw_text : str
"""
- if _is_url(obj):
+ if is_url(obj):
with urlopen(obj) as url:
text = url.read()
elif hasattr(obj, "read"):
@@ -705,7 +705,7 @@ def _build_doc(self):
parser = HTMLParser(recover=True, encoding=self.encoding)
try:
- if _is_url(self.io):
+ if is_url(self.io):
with urlopen(self.io) as f:
r = parse(f, parser=parser)
else:
@@ -717,7 +717,7 @@ def _build_doc(self):
pass
except (UnicodeDecodeError, IOError) as e:
# if the input is a blob of html goop
- if not _is_url(self.io):
+ if not is_url(self.io):
r = fromstring(self.io, parser=parser)
try:
@@ -1076,7 +1076,7 @@ def read_html(
"cannot skip rows starting from the end of the "
"data (you passed a negative value)"
)
- _validate_header_arg(header)
+ validate_header_arg(header)
return _parse(
flavor=flavor,
io=io,
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 6cb811bb97755..14a272e15bc29 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -20,10 +20,10 @@
from pandas.io.common import (
BaseIterator,
- _get_handle,
- _infer_compression,
- _stringify_path,
get_filepath_or_buffer,
+ get_handle,
+ infer_compression,
+ stringify_path,
)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import _validate_integer
@@ -58,7 +58,7 @@ def to_json(
"'index=False' is only valid when 'orient' is " "'split' or 'table'"
)
- path_or_buf = _stringify_path(path_or_buf)
+ path_or_buf = stringify_path(path_or_buf)
if lines and orient != "records":
raise ValueError("'lines' keyword only valid when 'orient' is records")
@@ -91,7 +91,7 @@ def to_json(
s = convert_to_line_delimits(s)
if isinstance(path_or_buf, str):
- fh, handles = _get_handle(path_or_buf, "w", compression=compression)
+ fh, handles = get_handle(path_or_buf, "w", compression=compression)
try:
fh.write(s)
finally:
@@ -584,7 +584,7 @@ def read_json(
if encoding is None:
encoding = "utf-8"
- compression = _infer_compression(path_or_buf, compression)
+ compression = infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression
)
@@ -704,7 +704,7 @@ def _get_data_from_filepath(self, filepath_or_buffer):
pass
if exists or self.compression is not None:
- data, _ = _get_handle(
+ data, _ = get_handle(
filepath_or_buffer,
"r",
encoding=self.encoding,
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 32d812637a067..cc3d2bd12ca35 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -35,6 +35,7 @@
is_categorical_dtype,
is_dtype_equal,
is_extension_array_dtype,
+ is_file_like,
is_float,
is_integer,
is_integer_dtype,
@@ -64,11 +65,10 @@
BaseIterator,
UnicodeReader,
UTF8Recoder,
- _get_handle,
- _infer_compression,
- _validate_header_arg,
get_filepath_or_buffer,
- is_file_like,
+ get_handle,
+ infer_compression,
+ validate_header_arg,
)
from pandas.io.date_converters import generic_parser
@@ -426,7 +426,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
kwds["encoding"] = encoding
compression = kwds.get("compression", "infer")
- compression = _infer_compression(filepath_or_buffer, compression)
+ compression = infer_compression(filepath_or_buffer, compression)
# TODO: get_filepath_or_buffer could return
# Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
@@ -1050,7 +1050,7 @@ def _clean_options(self, options, engine):
na_values = options["na_values"]
skiprows = options["skiprows"]
- _validate_header_arg(options["header"])
+ validate_header_arg(options["header"])
depr_warning = ""
@@ -2283,7 +2283,7 @@ def __init__(self, f, **kwds):
self.comment = kwds["comment"]
self._comment_lines = []
- f, handles = _get_handle(
+ f, handles = get_handle(
f,
"r",
encoding=self.encoding,
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 0a0ccedd78f00..6ce52da21b4e8 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -4,7 +4,7 @@
from pandas.compat import pickle_compat as pc
-from pandas.io.common import _get_handle, _stringify_path
+from pandas.io.common import get_handle, stringify_path
def to_pickle(obj, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL):
@@ -63,8 +63,8 @@ def to_pickle(obj, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL):
>>> import os
>>> os.remove("./dummy.pkl")
"""
- path = _stringify_path(path)
- f, fh = _get_handle(path, "wb", compression=compression, is_text=False)
+ path = stringify_path(path)
+ f, fh = get_handle(path, "wb", compression=compression, is_text=False)
if protocol < 0:
protocol = pickle.HIGHEST_PROTOCOL
try:
@@ -134,8 +134,8 @@ def read_pickle(path, compression="infer"):
>>> import os
>>> os.remove("./dummy.pkl")
"""
- path = _stringify_path(path)
- f, fh = _get_handle(path, "rb", compression=compression, is_text=False)
+ path = stringify_path(path)
+ f, fh = get_handle(path, "rb", compression=compression, is_text=False)
# 1) try standard library Pickle
# 2) try pickle_compat (older pandas version) to handle subclass changes
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index d14b4ecf070a7..8e0ab27c1fa85 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -63,7 +63,7 @@
from pandas.core.computation.pytables import PyTablesExpr, maybe_expression
from pandas.core.indexes.api import ensure_index
-from pandas.io.common import _stringify_path
+from pandas.io.common import stringify_path
from pandas.io.formats.printing import adjoin, pprint_thing
if TYPE_CHECKING:
@@ -274,7 +274,7 @@ def to_hdf(
encoding=encoding,
)
- path_or_buf = _stringify_path(path_or_buf)
+ path_or_buf = stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
with HDFStore(
path_or_buf, mode=mode, complevel=complevel, complib=complib
@@ -379,7 +379,7 @@ def read_hdf(
store = path_or_buf
auto_close = False
else:
- path_or_buf = _stringify_path(path_or_buf)
+ path_or_buf = stringify_path(path_or_buf)
if not isinstance(path_or_buf, str):
raise NotImplementedError(
"Support for generic buffers has not been implemented."
@@ -525,7 +525,7 @@ def __init__(
if complib is None and complevel is not None:
complib = tables.filters.default_complib
- self._path = _stringify_path(path)
+ self._path = stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index 6bd3532d538c7..56ebb583bc2f9 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -1,7 +1,7 @@
"""
Read SAS sas7bdat or xport files.
"""
-from pandas.io.common import _stringify_path
+from pandas.io.common import stringify_path
def read_sas(
@@ -52,7 +52,7 @@ def read_sas(
"than a string name, you must specify "
"a format string"
)
- filepath_or_buffer = _stringify_path(filepath_or_buffer)
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
raise ValueError(buffer_error_msg)
fname = filepath_or_buffer.lower()
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index dbe64e4c0f06d..84dd302fc293f 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -44,7 +44,7 @@
from pandas.core.frame import DataFrame
from pandas.core.series import Series
-from pandas.io.common import BaseIterator, _stringify_path, get_filepath_or_buffer
+from pandas.io.common import BaseIterator, get_filepath_or_buffer, stringify_path
_version_error = (
"Version of given Stata file is not 104, 105, 108, "
@@ -1051,7 +1051,7 @@ def __init__(
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
- path_or_buf = _stringify_path(path_or_buf)
+ path_or_buf = stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _, should_close = get_filepath_or_buffer(path_or_buf)
@@ -2112,7 +2112,7 @@ def __init__(
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
- self._fname = _stringify_path(fname)
+ self._fname = stringify_path(fname)
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
self._converted_names = {}
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index ad058faff96e7..5c39dcc1a7659 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -21,7 +21,7 @@
import pandas.core.common as com
import pandas.util.testing as tm
-from pandas.io.common import _get_handle
+from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
@@ -1065,7 +1065,7 @@ def test_to_csv_compression(self, df, encoding, compression):
tm.assert_frame_equal(df, result)
# test the round trip using file handle - to_csv -> read_csv
- f, _handles = _get_handle(
+ f, _handles = get_handle(
filename, "w", compression=compression, encoding=encoding
)
with f:
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index d2633ea0676cd..f4efbbeda6311 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -68,9 +68,9 @@ def test_expand_user_normal_path(self):
assert os.path.expanduser(filename) == expanded_name
def test_stringify_path_pathlib(self):
- rel_path = icom._stringify_path(Path("."))
+ rel_path = icom.stringify_path(Path("."))
assert rel_path == "."
- redundant_path = icom._stringify_path(Path("foo//bar"))
+ redundant_path = icom.stringify_path(Path("foo//bar"))
assert redundant_path == os.path.join("foo", "bar")
@td.skip_if_no("py.path")
@@ -78,11 +78,11 @@ def test_stringify_path_localpath(self):
path = os.path.join("foo", "bar")
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
- assert icom._stringify_path(lpath) == abs_path
+ assert icom.stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath("foo/bar.csv")
- result = icom._stringify_path(p)
+ result = icom.stringify_path(p)
assert result == "foo/bar.csv"
@pytest.mark.parametrize(
@@ -92,7 +92,7 @@ def test_stringify_path_fspath(self):
@pytest.mark.parametrize("path_type", path_types)
def test_infer_compression_from_path(self, extension, expected, path_type):
path = path_type("foo/bar.csv" + extension)
- compression = icom._infer_compression(path, compression="infer")
+ compression = icom.infer_compression(path, compression="infer")
assert compression == expected
def test_get_filepath_or_buffer_with_path(self):
@@ -313,18 +313,18 @@ def test_constructor_bad_file(self, mmap_file):
err = mmap.error
with pytest.raises(err, match=msg):
- icom.MMapWrapper(non_file)
+ icom._MMapWrapper(non_file)
target = open(mmap_file, "r")
target.close()
msg = "I/O operation on closed file"
with pytest.raises(ValueError, match=msg):
- icom.MMapWrapper(target)
+ icom._MMapWrapper(target)
def test_get_attr(self, mmap_file):
with open(mmap_file, "r") as target:
- wrapper = icom.MMapWrapper(target)
+ wrapper = icom._MMapWrapper(target)
attrs = dir(wrapper.mmap)
attrs = [attr for attr in attrs if not attr.startswith("__")]
@@ -337,7 +337,7 @@ def test_get_attr(self, mmap_file):
def test_next(self, mmap_file):
with open(mmap_file, "r") as target:
- wrapper = icom.MMapWrapper(target)
+ wrapper = icom._MMapWrapper(target)
lines = target.readlines()
for line in lines:
diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index 54eb2d78fb64f..e17a32cbc8b68 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -44,14 +44,14 @@ def test_compression_size(obj, method, compression_only):
@pytest.mark.parametrize("method", ["to_csv", "to_json"])
def test_compression_size_fh(obj, method, compression_only):
with tm.ensure_clean() as path:
- f, handles = icom._get_handle(path, "w", compression=compression_only)
+ f, handles = icom.get_handle(path, "w", compression=compression_only)
with f:
getattr(obj, method)(f)
assert not f.closed
assert f.closed
compressed_size = os.path.getsize(path)
with tm.ensure_clean() as path:
- f, handles = icom._get_handle(path, "w", compression=None)
+ f, handles = icom.get_handle(path, "w", compression=None)
with f:
getattr(obj, method)(f)
assert not f.closed
@@ -108,7 +108,7 @@ def test_compression_warning(compression_only):
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as path:
- f, handles = icom._get_handle(path, "w", compression=compression_only)
+ f, handles = icom.get_handle(path, "w", compression=compression_only)
with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False):
with f:
df.to_csv(f, compression=compression_only)
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 9041d582b19ca..b48c79000c98d 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -9,7 +9,7 @@
from pandas import DataFrame, Series
import pandas.util.testing as tm
-from pandas.io.common import _get_handle
+from pandas.io.common import get_handle
class TestSeriesToCSV:
@@ -143,7 +143,7 @@ def test_to_csv_compression(self, s, encoding, compression):
tm.assert_series_equal(s, result)
# test the round trip using file handle - to_csv -> read_csv
- f, _handles = _get_handle(
+ f, _handles = get_handle(
filename, "w", compression=compression, encoding=encoding
)
with f:
| https://api.github.com/repos/pandas-dev/pandas/pulls/30368 | 2019-12-20T00:19:18Z | 2019-12-20T17:44:53Z | 2019-12-20T17:44:53Z | 2019-12-20T17:45:19Z | |
CLN: replace str.format() with f-string | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 394d128164509..43221eb1e3194 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1740,7 +1740,7 @@ def to_records(self, index=True, column_dtypes=None, index_dtypes=None):
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
- >>> index_dtypes = "<S{}".format(df.index.str.len().max())
+ >>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
@@ -2340,13 +2340,9 @@ def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
- return "{num:3.1f}{size_q} {x}".format(
- num=num, size_q=size_qualifier, x=x
- )
+ return f"{num:3.1f}{size_qualifier} {x}"
num /= 1024.0
- return "{num:3.1f}{size_q} {pb}".format(
- num=num, size_q=size_qualifier, pb="PB"
- )
+ return f"{num:3.1f}{size_qualifier} PB"
if verbose:
_verbose_repr()
@@ -2359,7 +2355,7 @@ def _sizeof_fmt(num, size_qualifier):
_verbose_repr()
counts = self._data.get_dtype_counts()
- dtypes = ["{k}({kk:d})".format(k=k[0], kk=k[1]) for k in sorted(counts.items())]
+ dtypes = [f"{k[0]}({k[1]:d})" for k in sorted(counts.items())]
lines.append(f"dtypes: {', '.join(dtypes)}")
if memory_usage is None:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d83a60ffc1c7c..03bd1b331ec30 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -117,9 +117,8 @@ def _single_replace(self, to_replace, method, inplace, limit):
"""
if self.ndim != 1:
raise TypeError(
- "cannot replace {0} with method {1} on a {2}".format(
- to_replace, method, type(self).__name__
- )
+ f"cannot replace {to_replace} with method {method} on a "
+ f"{type(self).__name__}"
)
orig_dtype = self.dtype
@@ -254,7 +253,7 @@ def _validate_dtype(self, dtype):
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented"
- " in the {0} constructor".format(type(self).__name__)
+ f" in the {type(self).__name__} constructor"
)
return dtype
@@ -396,7 +395,7 @@ def _get_axis_number(cls, axis):
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
- raise ValueError("No axis named {0} for object type {1}".format(axis, cls))
+ raise ValueError(f"No axis named {axis} for object type {cls}")
@classmethod
def _get_axis_name(cls, axis):
@@ -409,7 +408,7 @@ def _get_axis_name(cls, axis):
return cls._AXIS_NAMES[axis]
except KeyError:
pass
- raise ValueError("No axis named {0} for object type {1}".format(axis, cls))
+ raise ValueError(f"No axis named {axis} for object type {cls}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
@@ -437,7 +436,7 @@ def _get_axis_resolvers(self, axis):
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
- key = "{prefix}level_{i}".format(prefix=prefix, i=i)
+ key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
@@ -1082,7 +1081,7 @@ def rename(self, *args, **kwargs):
if kwargs:
raise TypeError(
"rename() got an unexpected keyword "
- 'argument "{0}"'.format(list(kwargs.keys())[0])
+ f'argument "{list(kwargs.keys())[0]}"'
)
if com.count_not_none(*axes.values()) == 0:
@@ -1108,7 +1107,7 @@ def rename(self, *args, **kwargs):
missing_labels = [
label for index, label in enumerate(v) if indexer[index] == -1
]
- raise KeyError("{} not found in axis".format(missing_labels))
+ raise KeyError(f"{missing_labels} not found in axis")
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
@@ -1257,7 +1256,7 @@ class name
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
- 'argument "{0}"'.format(list(kwargs.keys())[0])
+ f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
@@ -1461,9 +1460,7 @@ def __neg__(self):
):
arr = operator.neg(values)
else:
- raise TypeError(
- "Unary negative expects numeric dtype, not {}".format(values.dtype)
- )
+ raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
@@ -1477,9 +1474,7 @@ def __pos__(self):
):
arr = operator.pos(values)
else:
- raise TypeError(
- "Unary plus expects numeric dtype, not {}".format(values.dtype)
- )
+ raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
@@ -1492,10 +1487,8 @@ def __invert__(self):
def __nonzero__(self):
raise ValueError(
- "The truth value of a {0} is ambiguous. "
- "Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(
- type(self).__name__
- )
+ f"The truth value of a {type(self).__name__} is ambiguous. "
+ "Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
@@ -1519,7 +1512,7 @@ def bool(self):
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
- "{0}".format(type(self).__name__)
+ f"{type(self).__name__}"
)
self.__nonzero__()
@@ -1659,14 +1652,8 @@ def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
)
msg = (
- "'{key}' is both {level_article} {level_type} level and "
- "{label_article} {label_type} label, which is ambiguous."
- ).format(
- key=key,
- level_article=level_article,
- level_type=level_type,
- label_article=label_article,
- label_type=label_type,
+ f"'{key}' is both {level_article} {level_type} level and "
+ f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
@@ -1731,12 +1718,8 @@ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
- "The {label_axis_name} label '{key}' "
- "is not unique.{multi_message}"
- ).format(
- key=key,
- label_axis_name=label_axis_name,
- multi_message=multi_message,
+ f"The {label_axis_name} label '{key}' "
+ f"is not unique.{multi_message}"
)
)
@@ -1780,8 +1763,8 @@ def _drop_labels_or_levels(self, keys, axis: int = 0):
raise ValueError(
(
"The following keys are not valid labels or "
- "levels for axis {axis}: {invalid_keys}"
- ).format(axis=axis, invalid_keys=invalid_keys)
+ f"levels for axis {axis}: {invalid_keys}"
+ )
)
# Compute levels and labels to drop
@@ -1998,7 +1981,7 @@ def __setstate__(self, state):
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
- prepr = "[%s]" % ",".join(map(pprint_thing, self))
+ prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
@@ -3946,13 +3929,13 @@ def _drop_axis(self, labels, axis, level=None, errors: str = "raise"):
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
- raise KeyError("{} not found in axis".format(labels))
+ raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
- raise KeyError("{} not found in axis".format(labels))
+ raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
@@ -4476,7 +4459,7 @@ def reindex(self, *args, **kwargs):
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
- 'argument "{0}"'.format(list(kwargs.keys())[0])
+ f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
@@ -5997,7 +5980,7 @@ def fillna(
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
- '"{0}"'.format(type(value).__name__)
+ f'"{type(value).__name__}"'
)
new_data = self._data.fillna(
@@ -6781,9 +6764,9 @@ def interpolate(
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
- "using {method} method other than linear. "
+ f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
- "interpolating.".format(method=method)
+ "interpolating."
)
if isna(index).any():
@@ -9205,7 +9188,7 @@ def _tz_convert(ax, tz):
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
- raise ValueError("The level {0} is not valid".format(level))
+ raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
@@ -9375,7 +9358,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent):
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
- raise ValueError("The level {0} is not valid".format(level))
+ raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
@@ -10357,8 +10340,8 @@ def last_valid_index(self):
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
- axis_descr = "{%s}" % ", ".join(
- "{0} ({1})".format(a, i) for i, a in enumerate(cls._AXIS_ORDERS)
+ axis_descr = (
+ f"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 24e2e674f6ae3..1fee154778f83 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -113,7 +113,7 @@ def cat_safe(list_of_columns: List, sep: str):
raise TypeError(
"Concatenation requires list-likes containing only "
"strings (or missing values). Offending values found in "
- "column {}".format(dtype)
+ f"column {dtype}"
) from None
return result
@@ -1350,8 +1350,8 @@ def str_find(arr, sub, start=0, end=None, side="left"):
"""
if not isinstance(sub, str):
- msg = "expected a string object, not {0}"
- raise TypeError(msg.format(type(sub).__name__))
+ msg = f"expected a string object, not {type(sub).__name__}"
+ raise TypeError(msg)
if side == "left":
method = "find"
@@ -1370,8 +1370,8 @@ def str_find(arr, sub, start=0, end=None, side="left"):
def str_index(arr, sub, start=0, end=None, side="left"):
if not isinstance(sub, str):
- msg = "expected a string object, not {0}"
- raise TypeError(msg.format(type(sub).__name__))
+ msg = f"expected a string object, not {type(sub).__name__}"
+ raise TypeError(msg)
if side == "left":
method = "index"
@@ -1442,15 +1442,15 @@ def str_pad(arr, width, side="left", fillchar=" "):
dtype: object
"""
if not isinstance(fillchar, str):
- msg = "fillchar must be a character, not {0}"
- raise TypeError(msg.format(type(fillchar).__name__))
+ msg = f"fillchar must be a character, not {type(fillchar).__name__}"
+ raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
- msg = "width must be of integer type, not {0}"
- raise TypeError(msg.format(type(width).__name__))
+ msg = f"width must be of integer type, not {type(width).__name__}"
+ raise TypeError(msg)
if side == "left":
f = lambda x: x.rjust(width, fillchar)
| - [x] ref #29547
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30363 | 2019-12-19T21:43:26Z | 2019-12-20T02:28:05Z | 2019-12-20T02:28:05Z | 2019-12-20T07:28:44Z |
BUG: raise KeyError if MultiIndex.get_loc_level is asked unused label | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index ea0677a0edf28..997ab8f50e07c 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -622,8 +622,8 @@ Missing
MultiIndex
^^^^^^^^^^
-- Removed compatibility for MultiIndex pickles prior to version 0.8.0; compatibility with MultiIndex pickles from version 0.13 forward is maintained (:issue:`21654`)
--
+- Removed compatibility for :class:`MultiIndex` pickles prior to version 0.8.0; compatibility with :class:`MultiIndex` pickles from version 0.13 forward is maintained (:issue:`21654`)
+- :meth:`MultiIndex.get_loc_level` (and as a consequence, ``.loc`` on a :class:``MultiIndex``ed object) will now raise a ``KeyError``, rather than returning an empty ``slice``, if asked a label which is present in the ``levels`` but is unused (:issue:`22221`)
-
I/O
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 2a97c37449e12..37f4415776b83 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2185,11 +2185,6 @@ def _maybe_to_slice(loc):
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
-
- # _get_level_indexer returns an empty slice if the key has
- # been dropped from the MultiIndex
- if isinstance(loc, slice) and loc.start == loc.stop:
- raise KeyError(key)
return _maybe_to_slice(loc)
keylen = len(key)
@@ -2443,14 +2438,21 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
else:
- loc = level_index.get_loc(key)
- if isinstance(loc, slice):
- return loc
- elif level > 0 or self.lexsort_depth == 0:
- return np.array(labels == loc, dtype=bool)
+ code = level_index.get_loc(key)
+
+ if level > 0 or self.lexsort_depth == 0:
+ # Desired level is not sorted
+ locs = np.array(labels == code, dtype=bool, copy=False)
+ if not locs.any():
+ # The label is present in self.levels[level] but unused:
+ raise KeyError(key)
+ return locs
- i = labels.searchsorted(loc, side='left')
- j = labels.searchsorted(loc, side='right')
+ i = labels.searchsorted(code, side='left')
+ j = labels.searchsorted(code, side='right')
+ if i == j:
+ # The label is present in self.levels[level] but unused:
+ raise KeyError(key)
return slice(i, j)
def get_locs(self, seq):
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 7c90d359a4054..3bc5e51ca046a 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -271,10 +271,7 @@ def test_apply_chunk_view():
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': compat.lrange(9)})
- # return view
- f = lambda x: x[:2]
-
- result = df.groupby('key', group_keys=False).apply(f)
+ result = df.groupby('key', group_keys=False).apply(lambda x: x[:2])
expected = df.take([0, 1, 3, 4, 6, 7])
tm.assert_frame_equal(result, expected)
@@ -518,6 +515,19 @@ def test_func(x):
tm.assert_frame_equal(result2, expected2)
+def test_groupby_apply_return_empty_chunk():
+ # GH 22221: apply filter which returns some empty groups
+ df = pd.DataFrame(dict(value=[0, 1], group=['filled', 'empty']))
+ groups = df.groupby('group')
+ result = groups.apply(lambda group: group[group.value != 1]['value'])
+ expected = pd.Series([0], name='value',
+ index=MultiIndex.from_product([['empty', 'filled'],
+ [0]],
+ names=['group', None]
+ ).drop('empty'))
+ tm.assert_series_equal(result, expected)
+
+
def test_apply_with_mixed_types():
# gh-20949
df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1, 2, 3], 'C': [4, 6, 5]})
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index ebd50909bae98..9ec11f1f42b9a 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -271,6 +271,10 @@ def test_get_loc_level():
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
+ # GH 22221: unused label
+ pytest.raises(KeyError, index.drop(2).get_loc_level, 2)
+ # Unused label on unsorted level:
+ pytest.raises(KeyError, index.drop(1, level=2).get_loc_level, 2, 2)
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
| - [x] closes #22221
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22230 | 2018-08-07T08:30:52Z | 2018-08-09T00:29:46Z | 2018-08-09T00:29:45Z | 2018-08-09T05:43:34Z |
ENH: Add strings_as_fixed_length parameter for df.to_records() (#18146) | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index b4331aab3085f..cca15f26cbf99 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -411,6 +411,7 @@ Other Enhancements
- :meth:`DataFrame.to_stata` and :class:`pandas.io.stata.StataWriter117` can write mixed sting columns to Stata strl format (:issue:`23633`)
- :meth:`DataFrame.between_time` and :meth:`DataFrame.at_time` have gained the ``axis`` parameter (:issue:`8839`)
- The ``scatter_matrix``, ``andrews_curves``, ``parallel_coordinates``, ``lag_plot``, ``autocorrelation_plot``, ``bootstrap_plot``, and ``radviz`` plots from the ``pandas.plotting`` module are now accessible from calling :meth:`DataFrame.plot` (:issue:`11978`)
+- :meth:`DataFrame.to_records` now accepts ``index_dtypes`` and ``column_dtypes`` parameters to allow different data types in stored column and index records (:issue:`18146`)
- :class:`IntervalIndex` has gained the :attr:`~IntervalIndex.is_overlapping` attribute to indicate if the ``IntervalIndex`` contains any overlapping intervals (:issue:`23309`)
- :func:`pandas.DataFrame.to_sql` has gained the ``method`` argument to control SQL insertion clause. See the :ref:`insertion method <io.sql.method>` section in the documentation. (:issue:`8953`)
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index 241a1b471f677..b11542622451c 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -398,8 +398,11 @@ def is_dict_like(obj):
>>> is_dict_like([1, 2, 3])
False
"""
+ for attr in ("__getitem__", "keys", "__contains__"):
+ if not hasattr(obj, attr):
+ return False
- return hasattr(obj, '__getitem__') and hasattr(obj, 'keys')
+ return True
def is_named_tuple(obj):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 99ae551d3c55b..99653248216f5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -35,7 +35,6 @@
OrderedDict, PY36, raise_with_traceback,
string_and_binary_types)
from pandas.compat.numpy import function as nv
-
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
@@ -49,6 +48,7 @@
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
+ is_dict_like,
is_object_dtype,
is_extension_type,
is_extension_array_dtype,
@@ -1540,7 +1540,8 @@ def from_records(cls, data, index=None, exclude=None, columns=None,
return cls(mgr)
- def to_records(self, index=True, convert_datetime64=None):
+ def to_records(self, index=True, convert_datetime64=None,
+ column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
@@ -1557,6 +1558,20 @@ def to_records(self, index=True, convert_datetime64=None):
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
+ column_dtypes : str, type, dict, default None
+ .. versionadded:: 0.24.0
+
+ If a string or type, the data type to store all columns. If
+ a dictionary, a mapping of column names and indices (zero-indexed)
+ to specific data types.
+ index_dtypes : str, type, dict, default None
+ .. versionadded:: 0.24.0
+
+ If a string or type, the data type to store all index levels. If
+ a dictionary, a mapping of index level names and indices
+ (zero-indexed) to specific data types.
+
+ This mapping is applied only if `index=True`.
Returns
-------
@@ -1598,6 +1613,23 @@ def to_records(self, index=True, convert_datetime64=None):
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
+
+ Data types can be specified for the columns:
+
+ >>> df.to_records(column_dtypes={"A": "int32"})
+ rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
+ dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
+
+ As well as for the index:
+
+ >>> df.to_records(index_dtypes="<S2")
+ rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
+ dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
+
+ >>> index_dtypes = "<S{}".format(df.index.str.len().max())
+ >>> df.to_records(index_dtypes=index_dtypes)
+ rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
+ dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if convert_datetime64 is not None:
@@ -1620,6 +1652,7 @@ def to_records(self, index=True, convert_datetime64=None):
count = 0
index_names = list(self.index.names)
+
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
@@ -1627,13 +1660,66 @@ def to_records(self, index=True, convert_datetime64=None):
count += 1
elif index_names[0] is None:
index_names = ['index']
+
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
+ index_names = []
+
+ index_len = len(index_names)
+ formats = []
+
+ for i, v in enumerate(arrays):
+ index = i
+
+ # When the names and arrays are collected, we
+ # first collect those in the DataFrame's index,
+ # followed by those in its columns.
+ #
+ # Thus, the total length of the array is:
+ # len(index_names) + len(DataFrame.columns).
+ #
+ # This check allows us to see whether we are
+ # handling a name / array in the index or column.
+ if index < index_len:
+ dtype_mapping = index_dtypes
+ name = index_names[index]
+ else:
+ index -= index_len
+ dtype_mapping = column_dtypes
+ name = self.columns[index]
+
+ # We have a dictionary, so we get the data type
+ # associated with the index or column (which can
+ # be denoted by its name in the DataFrame or its
+ # position in DataFrame's array of indices or
+ # columns, whichever is applicable.
+ if is_dict_like(dtype_mapping):
+ if name in dtype_mapping:
+ dtype_mapping = dtype_mapping[name]
+ elif index in dtype_mapping:
+ dtype_mapping = dtype_mapping[index]
+ else:
+ dtype_mapping = None
+
+ # If no mapping can be found, use the array's
+ # dtype attribute for formatting.
+ #
+ # A valid dtype must either be a type or
+ # string naming a type.
+ if dtype_mapping is None:
+ formats.append(v.dtype)
+ elif isinstance(dtype_mapping, (type, compat.string_types)):
+ formats.append(dtype_mapping)
+ else:
+ element = "row" if i < index_len else "column"
+ msg = ("Invalid dtype {dtype} specified for "
+ "{element} {name}").format(dtype=dtype_mapping,
+ element=element, name=name)
+ raise ValueError(msg)
- formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 0c22b595bc74d..d9b1b0db90562 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -178,6 +178,33 @@ def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
+@pytest.mark.parametrize("has_keys", [True, False])
+@pytest.mark.parametrize("has_getitem", [True, False])
+@pytest.mark.parametrize("has_contains", [True, False])
+def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
+ class DictLike(object):
+ def __init__(self, d):
+ self.d = d
+
+ if has_keys:
+ def keys(self):
+ return self.d.keys()
+
+ if has_getitem:
+ def __getitem__(self, key):
+ return self.d.__getitem__(key)
+
+ if has_contains:
+ def __contains__(self, key):
+ return self.d.__contains__(key)
+
+ d = DictLike({1: 2})
+ result = inference.is_dict_like(d)
+ expected = has_keys and has_getitem and has_contains
+
+ assert result is expected
+
+
def test_is_file_like(mock):
class MockFile(object):
pass
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index f1eb6a33eddeb..b875559169205 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -191,6 +191,157 @@ def test_to_records_with_categorical(self):
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
+ @pytest.mark.parametrize("kwargs,expected", [
+ # No dtypes --> default to array dtypes.
+ (dict(),
+ np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
+ dtype=[("index", "<i8"), ("A", "<i8"),
+ ("B", "<f8"), ("C", "O")])),
+
+ # Should have no effect in this case.
+ (dict(index=True),
+ np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
+ dtype=[("index", "<i8"), ("A", "<i8"),
+ ("B", "<f8"), ("C", "O")])),
+
+ # Column dtype applied across the board. Index unaffected.
+ (dict(column_dtypes="<U4"),
+ np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
+ dtype=[("index", "<i8"), ("A", "<U4"),
+ ("B", "<U4"), ("C", "<U4")])),
+
+ # Index dtype applied across the board. Columns unaffected.
+ (dict(index_dtypes="<U1"),
+ np.rec.array([("0", 1, 0.2, "a"), ("1", 2, 1.5, "bc")],
+ dtype=[("index", "<U1"), ("A", "<i8"),
+ ("B", "<f8"), ("C", "O")])),
+
+ # Pass in a type instance.
+ (dict(column_dtypes=np.unicode),
+ np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
+ dtype=[("index", "<i8"), ("A", "<U"),
+ ("B", "<U"), ("C", "<U")])),
+
+ # Pass in a dictionary (name-only).
+ (dict(column_dtypes={"A": np.int8, "B": np.float32, "C": "<U2"}),
+ np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
+ dtype=[("index", "<i8"), ("A", "i1"),
+ ("B", "<f4"), ("C", "<U2")])),
+
+ # Pass in a dictionary (indices-only).
+ (dict(index_dtypes={0: "int16"}),
+ np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
+ dtype=[("index", "i2"), ("A", "<i8"),
+ ("B", "<f8"), ("C", "O")])),
+
+ # Ignore index mappings if index is not True.
+ (dict(index=False, index_dtypes="<U2"),
+ np.rec.array([(1, 0.2, "a"), (2, 1.5, "bc")],
+ dtype=[("A", "<i8"), ("B", "<f8"), ("C", "O")])),
+
+ # Non-existent names / indices in mapping should not error.
+ (dict(index_dtypes={0: "int16", "not-there": "float32"}),
+ np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
+ dtype=[("index", "i2"), ("A", "<i8"),
+ ("B", "<f8"), ("C", "O")])),
+
+ # Names / indices not in mapping default to array dtype.
+ (dict(column_dtypes={"A": np.int8, "B": np.float32}),
+ np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
+ dtype=[("index", "<i8"), ("A", "i1"),
+ ("B", "<f4"), ("C", "O")])),
+
+ # Mixture of everything.
+ (dict(column_dtypes={"A": np.int8, "B": np.float32},
+ index_dtypes="<U2"),
+ np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
+ dtype=[("index", "<U2"), ("A", "i1"),
+ ("B", "<f4"), ("C", "O")])),
+
+ # Invalid dype values.
+ (dict(index=False, column_dtypes=list()),
+ "Invalid dtype \\[\\] specified for column A"),
+
+ (dict(index=False, column_dtypes={"A": "int32", "B": 5}),
+ "Invalid dtype 5 specified for column B"),
+ ])
+ def test_to_records_dtype(self, kwargs, expected):
+ # see gh-18146
+ df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
+
+ if isinstance(expected, str):
+ with pytest.raises(ValueError, match=expected):
+ df.to_records(**kwargs)
+ else:
+ result = df.to_records(**kwargs)
+ tm.assert_almost_equal(result, expected)
+
+ @pytest.mark.parametrize("df,kwargs,expected", [
+ # MultiIndex in the index.
+ (DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ columns=list("abc")).set_index(["a", "b"]),
+ dict(column_dtypes="float64", index_dtypes={0: "int32", 1: "int8"}),
+ np.rec.array([(1, 2, 3.), (4, 5, 6.), (7, 8, 9.)],
+ dtype=[("a", "<i4"), ("b", "i1"), ("c", "<f8")])),
+
+ # MultiIndex in the columns.
+ (DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ columns=MultiIndex.from_tuples([("a", "d"), ("b", "e"),
+ ("c", "f")])),
+ dict(column_dtypes={0: "<U1", 2: "float32"}, index_dtypes="float32"),
+ np.rec.array([(0., u"1", 2, 3.), (1., u"4", 5, 6.),
+ (2., u"7", 8, 9.)],
+ dtype=[("index", "<f4"),
+ ("('a', 'd')", "<U1"),
+ ("('b', 'e')", "<i8"),
+ ("('c', 'f')", "<f4")])),
+
+ # MultiIndex in both the columns and index.
+ (DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ columns=MultiIndex.from_tuples([
+ ("a", "d"), ("b", "e"), ("c", "f")], names=list("ab")),
+ index=MultiIndex.from_tuples([
+ ("d", -4), ("d", -5), ("f", -6)], names=list("cd"))),
+ dict(column_dtypes="float64", index_dtypes={0: "<U2", 1: "int8"}),
+ np.rec.array([("d", -4, 1., 2., 3.), ("d", -5, 4., 5., 6.),
+ ("f", -6, 7, 8, 9.)],
+ dtype=[("c", "<U2"), ("d", "i1"),
+ ("('a', 'd')", "<f8"), ("('b', 'e')", "<f8"),
+ ("('c', 'f')", "<f8")]))
+ ])
+ def test_to_records_dtype_mi(self, df, kwargs, expected):
+ # see gh-18146
+ result = df.to_records(**kwargs)
+ tm.assert_almost_equal(result, expected)
+
+ def test_to_records_dict_like(self):
+ # see gh-18146
+ class DictLike(object):
+ def __init__(self, **kwargs):
+ self.d = kwargs.copy()
+
+ def __getitem__(self, key):
+ return self.d.__getitem__(key)
+
+ def __contains__(self, key):
+ return key in self.d
+
+ def keys(self):
+ return self.d.keys()
+
+ df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
+
+ dtype_mappings = dict(column_dtypes=DictLike(**{"A": np.int8,
+ "B": np.float32}),
+ index_dtypes="<U2")
+
+ result = df.to_records(**dtype_mappings)
+ expected = np.rec.array([("0", "1", "0.2", "a"),
+ ("1", "2", "1.5", "bc")],
+ dtype=[("index", "<U2"), ("A", "i1"),
+ ("B", "<f4"), ("C", "O")])
+ tm.assert_almost_equal(result, expected)
+
@pytest.mark.parametrize('mapping', [
dict,
collections.defaultdict(list),
| This option changes DataFrame.to_records() dtype for string arrays
to 'Sx', where x is the length of the longest string, instead of 'O"
- [x] closes #18146
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22229 | 2018-08-07T03:49:30Z | 2018-12-30T22:40:39Z | 2018-12-30T22:40:39Z | 2018-12-30T22:40:50Z |
Fix test_info_repr #21746 | diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 76bdf141ec828..c19f8e57f9ae7 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -1586,8 +1586,12 @@ def test_repr_html_long_and_wide(self):
assert '...' in df._repr_html_()
def test_info_repr(self):
+ # GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
+ # the terminal size to ensure that we try to print something "too big"
+ term_width, term_height = get_terminal_size()
+
max_rows = 60
- max_cols = 20
+ max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
| Fixes test that fails locally in py27 when run in a terminal window wider than 80 characters.
- [x] closes #21746
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/22223 | 2018-08-06T20:02:24Z | 2018-08-07T13:12:13Z | 2018-08-07T13:12:13Z | 2018-08-08T15:51:22Z |
[CLN] Merge inference into lib.pyx | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index d80b5fd2bd0b9..89a96342a414b 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -1,21 +1,12 @@
# -*- coding: utf-8 -*-
-# cython: profile=False
+from decimal import Decimal
+import sys
+
cimport cython
from cython cimport Py_ssize_t
-import numpy as np
-cimport numpy as cnp
-from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM,
- PyArray_ITER_DATA, PyArray_ITER_NEXT, PyArray_IterNew,
- flatiter, NPY_OBJECT,
- int64_t,
- float32_t, float64_t,
- uint8_t, uint64_t,
- complex128_t)
-cnp.import_array()
-
from cpython cimport (Py_INCREF, PyTuple_SET_ITEM,
- PyList_Check, PyFloat_Check,
+ PyList_Check,
PyString_Check,
PyBytes_Check,
PyUnicode_Check,
@@ -28,13 +19,63 @@ from cpython.datetime cimport (PyDateTime_Check, PyDate_Check,
PyDateTime_IMPORT)
PyDateTime_IMPORT
-from tslib import NaT, array_to_datetime
-from missing cimport checknull
+import numpy as np
+cimport numpy as cnp
+from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM,
+ PyArray_ITER_DATA, PyArray_ITER_NEXT, PyArray_IterNew,
+ flatiter, NPY_OBJECT,
+ int64_t,
+ float32_t, float64_t,
+ uint8_t, uint64_t,
+ complex128_t)
+cnp.import_array()
+
+cdef extern from "numpy/arrayobject.h":
+ # cython's numpy.dtype specification is incorrect, which leads to
+ # errors in issubclass(self.dtype.type, np.bool_), so we directly
+ # include the correct version
+ # https://github.com/cython/cython/issues/2022
+
+ ctypedef class numpy.dtype [object PyArray_Descr]:
+ # Use PyDataType_* macros when possible, however there are no macros
+ # for accessing some of the fields, so some are defined. Please
+ # ask on cython-dev if you need more.
+ cdef int type_num
+ cdef int itemsize "elsize"
+ cdef char byteorder
+ cdef object fields
+ cdef tuple names
+
+
+cdef extern from "src/parse_helper.h":
+ int floatify(object, double *result, int *maybe_int) except -1
cimport util
+from util cimport (is_nan,
+ UINT8_MAX, UINT64_MAX, INT64_MAX, INT64_MIN)
+
+from tslib import array_to_datetime
+from tslibs.nattype import NaT
+from tslibs.conversion cimport convert_to_tsobject
+from tslibs.timedeltas cimport convert_to_timedelta64
+from tslibs.timezones cimport get_timezone, tz_compare
+
+from missing cimport (checknull,
+ is_null_datetime64, is_null_timedelta64, is_null_period)
+
+
+# constants that will be compared to potentially arbitrarily large
+# python int
+cdef object oINT64_MAX = <int64_t>INT64_MAX
+cdef object oINT64_MIN = <int64_t>INT64_MIN
+cdef object oUINT64_MAX = <uint64_t>UINT64_MAX
+
cdef int64_t NPY_NAT = util.get_nat()
-from util cimport is_array, is_nan
+iNaT = util.get_nat()
+
+cdef bint PY2 = sys.version_info[0] == 2
+cdef double nan = <double>np.NaN
def values_from_object(object o):
@@ -57,7 +98,7 @@ def memory_usage_of_objects(ndarray[object, ndim=1] arr):
cdef int64_t s = 0
n = len(arr)
- for i from 0 <= i < n:
+ for i in range(n):
s += arr[i].__sizeof__()
return s
@@ -138,10 +179,10 @@ def fast_unique_multiple(list arrays):
dict table = {}
object val, stub = 0
- for i from 0 <= i < k:
+ for i in range(k):
buf = arrays[i]
n = len(buf)
- for j from 0 <= j < n:
+ for j in range(n):
val = buf[j]
if val not in table:
table[val] = stub
@@ -165,10 +206,10 @@ def fast_unique_multiple_list(list lists, bint sort=True):
dict table = {}
object val, stub = 0
- for i from 0 <= i < k:
+ for i in range(k):
buf = lists[i]
n = len(buf)
- for j from 0 <= j < n:
+ for j in range(n):
val = buf[j]
if val not in table:
table[val] = stub
@@ -208,7 +249,7 @@ def fast_unique_multiple_list_gen(object gen, bint sort=True):
for buf in gen:
n = len(buf)
- for j from 0 <= j < n:
+ for j in range(n):
val = buf[j]
if val not in table:
table[val] = stub
@@ -595,7 +636,7 @@ def row_bool_subset(ndarray[float64_t, ndim=2] values,
ndarray[float64_t, ndim=2] out
n, k = (<object> values).shape
- assert(n == len(mask))
+ assert (n == len(mask))
out = np.empty((mask.sum(), k), dtype=np.float64)
@@ -617,7 +658,7 @@ def row_bool_subset_object(ndarray[object, ndim=2] values,
ndarray[object, ndim=2] out
n, k = (<object> values).shape
- assert(n == len(mask))
+ assert (n == len(mask))
out = np.empty((mask.sum(), k), dtype=object)
@@ -663,21 +704,21 @@ def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask,
Py_ssize_t i, j, k, n
ndarray[int64_t, ndim=2] counts
- assert(axis == 0 or axis == 1)
+ assert (axis == 0 or axis == 1)
n, k = (<object> mask).shape
if axis == 0:
counts = np.zeros((max_bin, k), dtype='i8')
with nogil:
- for i from 0 <= i < n:
- for j from 0 <= j < k:
+ for i in range(n):
+ for j in range(k):
counts[labels[i], j] += mask[i, j]
else: # axis == 1
counts = np.zeros((n, max_bin), dtype='i8')
with nogil:
- for i from 0 <= i < n:
- for j from 0 <= j < k:
+ for i in range(n):
+ for j in range(k):
counts[i, labels[j]] += mask[i, j]
return counts
@@ -753,4 +794,1470 @@ def indices_fast(object index, ndarray[int64_t] labels, list keys,
return result
-include "src/inference.pyx"
+# core.common import for fast inference checks
+
+cpdef bint is_float(object obj):
+ return util.is_float_object(obj)
+
+
+cpdef bint is_integer(object obj):
+ return util.is_integer_object(obj)
+
+
+cpdef bint is_bool(object obj):
+ return util.is_bool_object(obj)
+
+
+cpdef bint is_complex(object obj):
+ return util.is_complex_object(obj)
+
+
+cpdef bint is_decimal(object obj):
+ return isinstance(obj, Decimal)
+
+
+cpdef bint is_interval(object obj):
+ return getattr(obj, '_typ', '_typ') == 'interval'
+
+
+cpdef bint is_period(object val):
+ """ Return a boolean if this is a Period object """
+ return util.is_period_object(val)
+
+
+cdef inline bint is_offset(object val):
+ return getattr(val, '_typ', '_typ') == 'dateoffset'
+
+
+_TYPE_MAP = {
+ 'categorical': 'categorical',
+ 'category': 'categorical',
+ 'int8': 'integer',
+ 'int16': 'integer',
+ 'int32': 'integer',
+ 'int64': 'integer',
+ 'i': 'integer',
+ 'uint8': 'integer',
+ 'uint16': 'integer',
+ 'uint32': 'integer',
+ 'uint64': 'integer',
+ 'u': 'integer',
+ 'float32': 'floating',
+ 'float64': 'floating',
+ 'f': 'floating',
+ 'complex128': 'complex',
+ 'c': 'complex',
+ 'string': 'string' if PY2 else 'bytes',
+ 'S': 'string' if PY2 else 'bytes',
+ 'unicode': 'unicode' if PY2 else 'string',
+ 'U': 'unicode' if PY2 else 'string',
+ 'bool': 'boolean',
+ 'b': 'boolean',
+ 'datetime64[ns]': 'datetime64',
+ 'M': 'datetime64',
+ 'timedelta64[ns]': 'timedelta64',
+ 'm': 'timedelta64',
+}
+
+# types only exist on certain platform
+try:
+ np.float128
+ _TYPE_MAP['float128'] = 'floating'
+except AttributeError:
+ pass
+try:
+ np.complex256
+ _TYPE_MAP['complex256'] = 'complex'
+except AttributeError:
+ pass
+try:
+ np.float16
+ _TYPE_MAP['float16'] = 'floating'
+except AttributeError:
+ pass
+
+
+cdef class Seen(object):
+ """
+ Class for keeping track of the types of elements
+ encountered when trying to perform type conversions.
+ """
+
+ cdef:
+ bint int_ # seen_int
+ bint bool_ # seen_bool
+ bint null_ # seen_null
+ bint uint_ # seen_uint (unsigned integer)
+ bint sint_ # seen_sint (signed integer)
+ bint float_ # seen_float
+ bint object_ # seen_object
+ bint complex_ # seen_complex
+ bint datetime_ # seen_datetime
+ bint coerce_numeric # coerce data to numeric
+ bint timedelta_ # seen_timedelta
+ bint datetimetz_ # seen_datetimetz
+
+ def __cinit__(self, bint coerce_numeric=0):
+ """
+ Initialize a Seen instance.
+
+ Parameters
+ ----------
+ coerce_numeric : bint, default 0
+ Whether or not to force conversion to a numeric data type if
+ initial methods to convert to numeric fail.
+ """
+ self.int_ = 0
+ self.bool_ = 0
+ self.null_ = 0
+ self.uint_ = 0
+ self.sint_ = 0
+ self.float_ = 0
+ self.object_ = 0
+ self.complex_ = 0
+ self.datetime_ = 0
+ self.timedelta_ = 0
+ self.datetimetz_ = 0
+ self.coerce_numeric = coerce_numeric
+
+ cdef inline bint check_uint64_conflict(self) except -1:
+ """
+ Check whether we can safely convert a uint64 array to a numeric dtype.
+
+ There are two cases when conversion to numeric dtype with a uint64
+ array is not safe (and will therefore not be performed)
+
+ 1) A NaN element is encountered.
+
+ uint64 cannot be safely cast to float64 due to truncation issues
+ at the extreme ends of the range.
+
+ 2) A negative number is encountered.
+
+ There is no numerical dtype that can hold both negative numbers
+ and numbers greater than INT64_MAX. Hence, at least one number
+ will be improperly cast if we convert to a numeric dtype.
+
+ Returns
+ -------
+ return_values : bool
+ Whether or not we should return the original input array to avoid
+ data truncation.
+
+ Raises
+ ------
+ ValueError : uint64 elements were detected, and at least one of the
+ two conflict cases was also detected. However, we are
+ trying to force conversion to a numeric dtype.
+ """
+ return (self.uint_ and (self.null_ or self.sint_)
+ and not self.coerce_numeric)
+
+ cdef inline saw_null(self):
+ """
+ Set flags indicating that a null value was encountered.
+ """
+ self.null_ = 1
+ self.float_ = 1
+
+ cdef saw_int(self, object val):
+ """
+ Set flags indicating that an integer value was encountered.
+
+ In addition to setting a flag that an integer was seen, we
+ also set two flags depending on the type of integer seen:
+
+ 1) sint_ : a negative (signed) number in the
+ range of [-2**63, 0) was encountered
+ 2) uint_ : a positive number in the range of
+ [2**63, 2**64) was encountered
+
+ Parameters
+ ----------
+ val : Python int
+ Value with which to set the flags.
+ """
+ self.int_ = 1
+ self.sint_ = self.sint_ or (oINT64_MIN <= val < 0)
+ self.uint_ = self.uint_ or (oINT64_MAX < val <= oUINT64_MAX)
+
+ @property
+ def numeric_(self):
+ return self.complex_ or self.float_ or self.int_
+
+ @property
+ def is_bool(self):
+ return not (self.datetime_ or self.numeric_ or self.timedelta_)
+
+ @property
+ def is_float_or_complex(self):
+ return not (self.bool_ or self.datetime_ or self.timedelta_)
+
+
+cdef _try_infer_map(v):
+ """ if its in our map, just return the dtype """
+ cdef:
+ object attr, val
+ for attr in ['name', 'kind', 'base']:
+ val = getattr(v.dtype, attr)
+ if val in _TYPE_MAP:
+ return _TYPE_MAP[val]
+ return None
+
+
+def infer_dtype(object value, bint skipna=False):
+ """
+ Efficiently infer the type of a passed val, or list-like
+ array of values. Return a string describing the type.
+
+ Parameters
+ ----------
+ value : scalar, list, ndarray, or pandas type
+ skipna : bool, default False
+ Ignore NaN values when inferring the type. The default of ``False``
+ will be deprecated in a later version of pandas.
+
+ .. versionadded:: 0.21.0
+
+ Returns
+ -------
+ string describing the common type of the input data.
+ Results can include:
+
+ - string
+ - unicode
+ - bytes
+ - floating
+ - integer
+ - mixed-integer
+ - mixed-integer-float
+ - decimal
+ - complex
+ - categorical
+ - boolean
+ - datetime64
+ - datetime
+ - date
+ - timedelta64
+ - timedelta
+ - time
+ - period
+ - mixed
+
+ Raises
+ ------
+ TypeError if ndarray-like but cannot infer the dtype
+
+ Notes
+ -----
+ - 'mixed' is the catchall for anything that is not otherwise
+ specialized
+ - 'mixed-integer-float' are floats and integers
+ - 'mixed-integer' are integers mixed with non-integers
+
+ Examples
+ --------
+ >>> infer_dtype(['foo', 'bar'])
+ 'string'
+
+ >>> infer_dtype(['a', np.nan, 'b'], skipna=True)
+ 'string'
+
+ >>> infer_dtype(['a', np.nan, 'b'], skipna=False)
+ 'mixed'
+
+ >>> infer_dtype([b'foo', b'bar'])
+ 'bytes'
+
+ >>> infer_dtype([1, 2, 3])
+ 'integer'
+
+ >>> infer_dtype([1, 2, 3.5])
+ 'mixed-integer-float'
+
+ >>> infer_dtype([1.0, 2.0, 3.5])
+ 'floating'
+
+ >>> infer_dtype(['a', 1])
+ 'mixed-integer'
+
+ >>> infer_dtype([Decimal(1), Decimal(2.0)])
+ 'decimal'
+
+ >>> infer_dtype([True, False])
+ 'boolean'
+
+ >>> infer_dtype([True, False, np.nan])
+ 'mixed'
+
+ >>> infer_dtype([pd.Timestamp('20130101')])
+ 'datetime'
+
+ >>> infer_dtype([datetime.date(2013, 1, 1)])
+ 'date'
+
+ >>> infer_dtype([np.datetime64('2013-01-01')])
+ 'datetime64'
+
+ >>> infer_dtype([datetime.timedelta(0, 1, 1)])
+ 'timedelta'
+
+ >>> infer_dtype(pd.Series(list('aabc')).astype('category'))
+ 'categorical'
+ """
+ cdef:
+ Py_ssize_t i, n
+ object val
+ ndarray values
+ bint seen_pdnat = False
+ bint seen_val = False
+
+ if util.is_array(value):
+ values = value
+ elif hasattr(value, 'dtype'):
+ # this will handle ndarray-like
+ # e.g. categoricals
+ try:
+ values = getattr(value, '_values', getattr(value, 'values', value))
+ except:
+ value = _try_infer_map(value)
+ if value is not None:
+ return value
+
+ # its ndarray like but we can't handle
+ raise ValueError("cannot infer type for {typ}"
+ .format(typ=type(value)))
+
+ else:
+ if not PyList_Check(value):
+ value = list(value)
+ from pandas.core.dtypes.cast import (
+ construct_1d_object_array_from_listlike)
+ values = construct_1d_object_array_from_listlike(value)
+
+ values = getattr(values, 'values', values)
+ val = _try_infer_map(values)
+ if val is not None:
+ return val
+
+ if values.dtype != np.object_:
+ values = values.astype('O')
+
+ # make contiguous
+ values = values.ravel()
+
+ n = len(values)
+ if n == 0:
+ return 'empty'
+
+ # try to use a valid value
+ for i in range(n):
+ val = util.get_value_1d(values, i)
+
+ # do not use is_nul_datetimelike to keep
+ # np.datetime64('nat') and np.timedelta64('nat')
+ if val is None or util.is_nan(val):
+ pass
+ elif val is NaT:
+ seen_pdnat = True
+ else:
+ seen_val = True
+ break
+
+ # if all values are nan/NaT
+ if seen_val is False and seen_pdnat is True:
+ return 'datetime'
+ # float/object nan is handled in latter logic
+
+ if util.is_datetime64_object(val):
+ if is_datetime64_array(values):
+ return 'datetime64'
+ elif is_timedelta_or_timedelta64_array(values):
+ return 'timedelta'
+
+ elif is_timedelta(val):
+ if is_timedelta_or_timedelta64_array(values):
+ return 'timedelta'
+
+ elif util.is_integer_object(val):
+ # a timedelta will show true here as well
+ if is_timedelta(val):
+ if is_timedelta_or_timedelta64_array(values):
+ return 'timedelta'
+
+ if is_integer_array(values):
+ return 'integer'
+ elif is_integer_float_array(values):
+ return 'mixed-integer-float'
+ elif is_timedelta_or_timedelta64_array(values):
+ return 'timedelta'
+ return 'mixed-integer'
+
+ elif PyDateTime_Check(val):
+ if is_datetime_array(values):
+ return 'datetime'
+
+ elif PyDate_Check(val):
+ if is_date_array(values, skipna=skipna):
+ return 'date'
+
+ elif PyTime_Check(val):
+ if is_time_array(values, skipna=skipna):
+ return 'time'
+
+ elif is_decimal(val):
+ return 'decimal'
+
+ elif util.is_float_object(val):
+ if is_float_array(values):
+ return 'floating'
+ elif is_integer_float_array(values):
+ return 'mixed-integer-float'
+
+ elif util.is_bool_object(val):
+ if is_bool_array(values, skipna=skipna):
+ return 'boolean'
+
+ elif PyString_Check(val):
+ if is_string_array(values, skipna=skipna):
+ return 'string'
+
+ elif PyUnicode_Check(val):
+ if is_unicode_array(values, skipna=skipna):
+ return 'unicode'
+
+ elif PyBytes_Check(val):
+ if is_bytes_array(values, skipna=skipna):
+ return 'bytes'
+
+ elif is_period(val):
+ if is_period_array(values):
+ return 'period'
+
+ elif is_interval(val):
+ if is_interval_array(values):
+ return 'interval'
+
+ for i in range(n):
+ val = util.get_value_1d(values, i)
+ if (util.is_integer_object(val) and
+ not util.is_timedelta64_object(val) and
+ not util.is_datetime64_object(val)):
+ return 'mixed-integer'
+
+ return 'mixed'
+
+
+cpdef object infer_datetimelike_array(object arr):
+ """
+ infer if we have a datetime or timedelta array
+ - date: we have *only* date and maybe strings, nulls
+ - datetime: we have *only* datetimes and maybe strings, nulls
+ - timedelta: we have *only* timedeltas and maybe strings, nulls
+ - nat: we do not have *any* date, datetimes or timedeltas, but do have
+ at least a NaT
+ - mixed: other objects (strings, a mix of tz-aware and tz-naive, or
+ actual objects)
+
+ Parameters
+ ----------
+ arr : object array
+
+ Returns
+ -------
+ string: {datetime, timedelta, date, nat, mixed}
+ """
+
+ cdef:
+ Py_ssize_t i, n = len(arr)
+ bint seen_timedelta = 0, seen_date = 0, seen_datetime = 0
+ bint seen_tz_aware = 0, seen_tz_naive = 0
+ bint seen_nat = 0
+ list objs = []
+ object v
+
+ for i in range(n):
+ v = arr[i]
+ if util.is_string_object(v):
+ objs.append(v)
+
+ if len(objs) == 3:
+ break
+
+ elif v is None or util.is_nan(v):
+ # nan or None
+ pass
+ elif v is NaT:
+ seen_nat = 1
+ elif PyDateTime_Check(v):
+ # datetime
+ seen_datetime = 1
+
+ # disambiguate between tz-naive and tz-aware
+ if v.tzinfo is None:
+ seen_tz_naive = 1
+ else:
+ seen_tz_aware = 1
+
+ if seen_tz_naive and seen_tz_aware:
+ return 'mixed'
+ elif util.is_datetime64_object(v):
+ # np.datetime64
+ seen_datetime = 1
+ elif PyDate_Check(v):
+ seen_date = 1
+ elif is_timedelta(v) or util.is_timedelta64_object(v):
+ # timedelta, or timedelta64
+ seen_timedelta = 1
+ else:
+ return 'mixed'
+
+ if seen_date and not (seen_datetime or seen_timedelta):
+ return 'date'
+ elif seen_datetime and not seen_timedelta:
+ return 'datetime'
+ elif seen_timedelta and not seen_datetime:
+ return 'timedelta'
+ elif seen_nat:
+ return 'nat'
+
+ # short-circuit by trying to
+ # actually convert these strings
+ # this is for performance as we don't need to try
+ # convert *every* string array
+ if len(objs):
+ try:
+ array_to_datetime(objs, errors='raise')
+ return 'datetime'
+ except:
+ pass
+
+ # we are *not* going to infer from strings
+ # for timedelta as too much ambiguity
+
+ return 'mixed'
+
+
+cdef inline bint is_timedelta(object o):
+ return PyDelta_Check(o) or util.is_timedelta64_object(o)
+
+
+cdef class Validator:
+
+ cdef:
+ Py_ssize_t n
+ dtype dtype
+ bint skipna
+
+ def __cinit__(self, Py_ssize_t n, dtype dtype=np.dtype(np.object_),
+ bint skipna=False):
+ self.n = n
+ self.dtype = dtype
+ self.skipna = skipna
+
+ cdef bint validate(self, ndarray values) except -1:
+ if not self.n:
+ return False
+
+ if self.is_array_typed():
+ return True
+ elif self.dtype.type_num == NPY_OBJECT:
+ if self.skipna:
+ return self._validate_skipna(values)
+ else:
+ return self._validate(values)
+ else:
+ return False
+
+ @cython.wraparound(False)
+ @cython.boundscheck(False)
+ cdef bint _validate(self, ndarray values) except -1:
+ cdef:
+ Py_ssize_t i
+ Py_ssize_t n = self.n
+
+ for i in range(n):
+ if not self.is_valid(values[i]):
+ return False
+
+ return self.finalize_validate()
+
+ @cython.wraparound(False)
+ @cython.boundscheck(False)
+ cdef bint _validate_skipna(self, ndarray values) except -1:
+ cdef:
+ Py_ssize_t i
+ Py_ssize_t n = self.n
+
+ for i in range(n):
+ if not self.is_valid_skipna(values[i]):
+ return False
+
+ return self.finalize_validate_skipna()
+
+ cdef bint is_valid(self, object value) except -1:
+ return self.is_value_typed(value)
+
+ cdef bint is_valid_skipna(self, object value) except -1:
+ return self.is_valid(value) or self.is_valid_null(value)
+
+ cdef bint is_value_typed(self, object value) except -1:
+ raise NotImplementedError(
+ '{typ} child class must define is_value_typed'
+ .format(typ=type(self).__name__))
+
+ cdef bint is_valid_null(self, object value) except -1:
+ return value is None or util.is_nan(value)
+
+ cdef bint is_array_typed(self) except -1:
+ return False
+
+ cdef inline bint finalize_validate(self):
+ return True
+
+ cdef bint finalize_validate_skipna(self):
+ # TODO(phillipc): Remove the existing validate methods and replace them
+ # with the skipna versions upon full deprecation of skipna=False
+ return True
+
+
+cdef class BoolValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return util.is_bool_object(value)
+
+ cdef inline bint is_array_typed(self) except -1:
+ return issubclass(self.dtype.type, np.bool_)
+
+
+cpdef bint is_bool_array(ndarray values, bint skipna=False):
+ cdef:
+ BoolValidator validator = BoolValidator(len(values),
+ values.dtype,
+ skipna=skipna)
+ return validator.validate(values)
+
+
+cdef class IntegerValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return util.is_integer_object(value)
+
+ cdef inline bint is_array_typed(self) except -1:
+ return issubclass(self.dtype.type, np.integer)
+
+
+cpdef bint is_integer_array(ndarray values):
+ cdef:
+ IntegerValidator validator = IntegerValidator(len(values),
+ values.dtype)
+ return validator.validate(values)
+
+
+cdef class IntegerFloatValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return util.is_integer_object(value) or util.is_float_object(value)
+
+ cdef inline bint is_array_typed(self) except -1:
+ return issubclass(self.dtype.type, np.integer)
+
+
+cdef bint is_integer_float_array(ndarray values):
+ cdef:
+ IntegerFloatValidator validator = IntegerFloatValidator(len(values),
+ values.dtype)
+ return validator.validate(values)
+
+
+cdef class FloatValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return util.is_float_object(value)
+
+ cdef inline bint is_array_typed(self) except -1:
+ return issubclass(self.dtype.type, np.floating)
+
+
+cpdef bint is_float_array(ndarray values):
+ cdef:
+ FloatValidator validator = FloatValidator(len(values), values.dtype)
+ return validator.validate(values)
+
+
+cdef class StringValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return PyString_Check(value)
+
+ cdef inline bint is_array_typed(self) except -1:
+ return issubclass(self.dtype.type, np.str_)
+
+
+cpdef bint is_string_array(ndarray values, bint skipna=False):
+ cdef:
+ StringValidator validator = StringValidator(len(values),
+ values.dtype,
+ skipna=skipna)
+ return validator.validate(values)
+
+
+cdef class UnicodeValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return PyUnicode_Check(value)
+
+ cdef inline bint is_array_typed(self) except -1:
+ return issubclass(self.dtype.type, np.unicode_)
+
+
+cdef bint is_unicode_array(ndarray values, bint skipna=False):
+ cdef:
+ UnicodeValidator validator = UnicodeValidator(len(values),
+ values.dtype,
+ skipna=skipna)
+ return validator.validate(values)
+
+
+cdef class BytesValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return PyBytes_Check(value)
+
+ cdef inline bint is_array_typed(self) except -1:
+ return issubclass(self.dtype.type, np.bytes_)
+
+
+cdef bint is_bytes_array(ndarray values, bint skipna=False):
+ cdef:
+ BytesValidator validator = BytesValidator(len(values), values.dtype,
+ skipna=skipna)
+ return validator.validate(values)
+
+
+cdef class TemporalValidator(Validator):
+ cdef:
+ Py_ssize_t generic_null_count
+
+ def __cinit__(self, Py_ssize_t n, dtype dtype=np.dtype(np.object_),
+ bint skipna=False):
+ self.n = n
+ self.dtype = dtype
+ self.skipna = skipna
+ self.generic_null_count = 0
+
+ cdef inline bint is_valid(self, object value) except -1:
+ return self.is_value_typed(value) or self.is_valid_null(value)
+
+ cdef bint is_valid_null(self, object value) except -1:
+ raise NotImplementedError(
+ '{typ} child class must define is_valid_null'
+ .format(typ=type(self).__name__))
+
+ cdef inline bint is_valid_skipna(self, object value) except -1:
+ cdef:
+ bint is_typed_null = self.is_valid_null(value)
+ bint is_generic_null = value is None or util.is_nan(value)
+ self.generic_null_count += is_typed_null and is_generic_null
+ return self.is_value_typed(value) or is_typed_null or is_generic_null
+
+ cdef inline bint finalize_validate_skipna(self):
+ return self.generic_null_count != self.n
+
+
+cdef class DatetimeValidator(TemporalValidator):
+ cdef bint is_value_typed(self, object value) except -1:
+ return PyDateTime_Check(value)
+
+ cdef inline bint is_valid_null(self, object value) except -1:
+ return is_null_datetime64(value)
+
+
+cpdef bint is_datetime_array(ndarray values):
+ cdef:
+ DatetimeValidator validator = DatetimeValidator(len(values),
+ skipna=True)
+ return validator.validate(values)
+
+
+cdef class Datetime64Validator(DatetimeValidator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return util.is_datetime64_object(value)
+
+
+cpdef bint is_datetime64_array(ndarray values):
+ cdef:
+ Datetime64Validator validator = Datetime64Validator(len(values),
+ skipna=True)
+ return validator.validate(values)
+
+
+cpdef bint is_datetime_with_singletz_array(ndarray values):
+ """
+ Check values have the same tzinfo attribute.
+ Doesn't check values are datetime-like types.
+ """
+ cdef:
+ Py_ssize_t i, j, n = len(values)
+ object base_val, base_tz, val, tz
+
+ if n == 0:
+ return False
+
+ for i in range(n):
+ base_val = values[i]
+ if base_val is not NaT:
+ base_tz = get_timezone(getattr(base_val, 'tzinfo', None))
+
+ for j in range(i, n):
+ val = values[j]
+ if val is not NaT:
+ tz = getattr(val, 'tzinfo', None)
+ if not tz_compare(base_tz, tz):
+ return False
+ break
+
+ return True
+
+
+cdef class TimedeltaValidator(TemporalValidator):
+ cdef bint is_value_typed(self, object value) except -1:
+ return PyDelta_Check(value)
+
+ cdef inline bint is_valid_null(self, object value) except -1:
+ return is_null_timedelta64(value)
+
+
+cpdef bint is_timedelta_array(ndarray values):
+ cdef:
+ TimedeltaValidator validator = TimedeltaValidator(len(values),
+ skipna=True)
+ return validator.validate(values)
+
+
+cdef class Timedelta64Validator(TimedeltaValidator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return util.is_timedelta64_object(value)
+
+
+cpdef bint is_timedelta64_array(ndarray values):
+ cdef:
+ Timedelta64Validator validator = Timedelta64Validator(len(values),
+ skipna=True)
+ return validator.validate(values)
+
+
+cdef class AnyTimedeltaValidator(TimedeltaValidator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return is_timedelta(value)
+
+
+cpdef bint is_timedelta_or_timedelta64_array(ndarray values):
+ """ infer with timedeltas and/or nat/none """
+ cdef:
+ AnyTimedeltaValidator validator = AnyTimedeltaValidator(len(values),
+ skipna=True)
+ return validator.validate(values)
+
+
+cdef class DateValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return PyDate_Check(value)
+
+
+cpdef bint is_date_array(ndarray values, bint skipna=False):
+ cdef:
+ DateValidator validator = DateValidator(len(values), skipna=skipna)
+ return validator.validate(values)
+
+
+cdef class TimeValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return PyTime_Check(value)
+
+
+cpdef bint is_time_array(ndarray values, bint skipna=False):
+ cdef:
+ TimeValidator validator = TimeValidator(len(values), skipna=skipna)
+ return validator.validate(values)
+
+
+cdef class PeriodValidator(TemporalValidator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return is_period(value)
+
+ cdef inline bint is_valid_null(self, object value) except -1:
+ return is_null_period(value)
+
+
+cpdef bint is_period_array(ndarray values):
+ cdef:
+ PeriodValidator validator = PeriodValidator(len(values), skipna=True)
+ return validator.validate(values)
+
+
+cdef class IntervalValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return is_interval(value)
+
+
+cpdef bint is_interval_array(ndarray values):
+ cdef:
+ IntervalValidator validator = IntervalValidator(len(values),
+ skipna=True)
+ return validator.validate(values)
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def maybe_convert_numeric(ndarray[object] values, set na_values,
+ bint convert_empty=True, bint coerce_numeric=False):
+ """
+ Convert object array to a numeric array if possible.
+
+ Parameters
+ ----------
+ values : ndarray
+ Array of object elements to convert.
+ na_values : set
+ Set of values that should be interpreted as NaN.
+ convert_empty : bool, default True
+ If an empty array-like object is encountered, whether to interpret
+ that element as NaN or not. If set to False, a ValueError will be
+ raised if such an element is encountered and 'coerce_numeric' is False.
+ coerce_numeric : bool, default False
+ If initial attempts to convert to numeric have failed, whether to
+ force conversion to numeric via alternative methods or by setting the
+ element to NaN. Otherwise, an Exception will be raised when such an
+ element is encountered.
+
+ This boolean also has an impact on how conversion behaves when a
+ numeric array has no suitable numerical dtype to return (i.e. uint64,
+ int32, uint8). If set to False, the original object array will be
+ returned. Otherwise, a ValueError will be raised.
+
+ Returns
+ -------
+ numeric_array : array of converted object values to numerical ones
+ """
+
+ if len(values) == 0:
+ return np.array([], dtype='i8')
+
+ # fastpath for ints - try to convert all based on first value
+ cdef:
+ object val = values[0]
+
+ if util.is_integer_object(val):
+ try:
+ maybe_ints = values.astype('i8')
+ if (maybe_ints == values).all():
+ return maybe_ints
+ except (ValueError, OverflowError, TypeError):
+ pass
+
+ # otherwise, iterate and do full infererence
+ cdef:
+ int status, maybe_int
+ Py_ssize_t i, n = values.size
+ Seen seen = Seen(coerce_numeric)
+ ndarray[float64_t] floats = np.empty(n, dtype='f8')
+ ndarray[complex128_t] complexes = np.empty(n, dtype='c16')
+ ndarray[int64_t] ints = np.empty(n, dtype='i8')
+ ndarray[uint64_t] uints = np.empty(n, dtype='u8')
+ ndarray[uint8_t] bools = np.empty(n, dtype='u1')
+ float64_t fval
+
+ for i in range(n):
+ val = values[i]
+
+ if val.__hash__ is not None and val in na_values:
+ seen.saw_null()
+ floats[i] = complexes[i] = nan
+ elif util.is_float_object(val):
+ fval = val
+ if fval != fval:
+ seen.null_ = True
+
+ floats[i] = complexes[i] = fval
+ seen.float_ = True
+ elif util.is_integer_object(val):
+ floats[i] = complexes[i] = val
+
+ val = int(val)
+ seen.saw_int(val)
+
+ if val >= 0:
+ if val <= oUINT64_MAX:
+ uints[i] = val
+ else:
+ seen.float_ = True
+
+ if val <= oINT64_MAX:
+ ints[i] = val
+
+ if seen.sint_ and seen.uint_:
+ seen.float_ = True
+
+ elif util.is_bool_object(val):
+ floats[i] = uints[i] = ints[i] = bools[i] = val
+ seen.bool_ = True
+ elif val is None:
+ seen.saw_null()
+ floats[i] = complexes[i] = nan
+ elif hasattr(val, '__len__') and len(val) == 0:
+ if convert_empty or seen.coerce_numeric:
+ seen.saw_null()
+ floats[i] = complexes[i] = nan
+ else:
+ raise ValueError('Empty string encountered')
+ elif util.is_complex_object(val):
+ complexes[i] = val
+ seen.complex_ = True
+ elif is_decimal(val):
+ floats[i] = complexes[i] = val
+ seen.float_ = True
+ else:
+ try:
+ status = floatify(val, &fval, &maybe_int)
+
+ if fval in na_values:
+ seen.saw_null()
+ floats[i] = complexes[i] = nan
+ else:
+ if fval != fval:
+ seen.null_ = True
+
+ floats[i] = fval
+
+ if maybe_int:
+ as_int = int(val)
+
+ if as_int in na_values:
+ seen.saw_null()
+ else:
+ seen.saw_int(as_int)
+
+ if not (seen.float_ or as_int in na_values):
+ if as_int < oINT64_MIN or as_int > oUINT64_MAX:
+ raise ValueError('Integer out of range.')
+
+ if as_int >= 0:
+ uints[i] = as_int
+ if as_int <= oINT64_MAX:
+ ints[i] = as_int
+
+ seen.float_ = seen.float_ or (seen.uint_ and seen.sint_)
+ else:
+ seen.float_ = True
+ except (TypeError, ValueError) as e:
+ if not seen.coerce_numeric:
+ raise type(e)(str(e) + ' at position {pos}'.format(pos=i))
+ elif "uint64" in str(e): # Exception from check functions.
+ raise
+ seen.saw_null()
+ floats[i] = nan
+
+ if seen.check_uint64_conflict():
+ return values
+
+ if seen.complex_:
+ return complexes
+ elif seen.float_:
+ return floats
+ elif seen.int_:
+ if seen.uint_:
+ return uints
+ else:
+ return ints
+ elif seen.bool_:
+ return bools.view(np.bool_)
+ elif seen.uint_:
+ return uints
+ return ints
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
+ bint safe=0, bint convert_datetime=0,
+ bint convert_timedelta=0):
+ """
+ Type inference function-- convert object array to proper dtype
+ """
+ cdef:
+ Py_ssize_t i, n
+ ndarray[float64_t] floats
+ ndarray[complex128_t] complexes
+ ndarray[int64_t] ints
+ ndarray[uint64_t] uints
+ ndarray[uint8_t] bools
+ ndarray[int64_t] idatetimes
+ ndarray[int64_t] itimedeltas
+ Seen seen = Seen()
+ object val
+ float64_t fval, fnan
+
+ n = len(objects)
+
+ floats = np.empty(n, dtype='f8')
+ complexes = np.empty(n, dtype='c16')
+ ints = np.empty(n, dtype='i8')
+ uints = np.empty(n, dtype='u8')
+ bools = np.empty(n, dtype=np.uint8)
+
+ if convert_datetime:
+ datetimes = np.empty(n, dtype='M8[ns]')
+ idatetimes = datetimes.view(np.int64)
+
+ if convert_timedelta:
+ timedeltas = np.empty(n, dtype='m8[ns]')
+ itimedeltas = timedeltas.view(np.int64)
+
+ fnan = np.nan
+
+ for i in range(n):
+ val = objects[i]
+
+ if val is None:
+ seen.null_ = 1
+ floats[i] = complexes[i] = fnan
+ elif val is NaT:
+ if convert_datetime:
+ idatetimes[i] = iNaT
+ seen.datetime_ = 1
+ if convert_timedelta:
+ itimedeltas[i] = iNaT
+ seen.timedelta_ = 1
+ if not (convert_datetime or convert_timedelta):
+ seen.object_ = 1
+ elif util.is_bool_object(val):
+ seen.bool_ = 1
+ bools[i] = val
+ elif util.is_float_object(val):
+ floats[i] = complexes[i] = val
+ seen.float_ = 1
+ elif util.is_datetime64_object(val):
+ if convert_datetime:
+ idatetimes[i] = convert_to_tsobject(
+ val, None, None, 0, 0).value
+ seen.datetime_ = 1
+ else:
+ seen.object_ = 1
+ break
+ elif is_timedelta(val):
+ if convert_timedelta:
+ itimedeltas[i] = convert_to_timedelta64(val, 'ns')
+ seen.timedelta_ = 1
+ else:
+ seen.object_ = 1
+ break
+ elif util.is_integer_object(val):
+ seen.int_ = 1
+ floats[i] = <float64_t> val
+ complexes[i] = <double complex> val
+ if not seen.null_:
+ seen.saw_int(int(val))
+
+ if ((seen.uint_ and seen.sint_) or
+ val > oUINT64_MAX or val < oINT64_MIN):
+ seen.object_ = 1
+ break
+
+ if seen.uint_:
+ uints[i] = val
+ elif seen.sint_:
+ ints[i] = val
+ else:
+ uints[i] = val
+ ints[i] = val
+
+ elif util.is_complex_object(val):
+ complexes[i] = val
+ seen.complex_ = 1
+ elif PyDateTime_Check(val) or util.is_datetime64_object(val):
+
+ # if we have an tz's attached then return the objects
+ if convert_datetime:
+ if getattr(val, 'tzinfo', None) is not None:
+ seen.datetimetz_ = 1
+ break
+ else:
+ seen.datetime_ = 1
+ idatetimes[i] = convert_to_tsobject(
+ val, None, None, 0, 0).value
+ else:
+ seen.object_ = 1
+ break
+ elif try_float and not util.is_string_object(val):
+ # this will convert Decimal objects
+ try:
+ floats[i] = float(val)
+ complexes[i] = complex(val)
+ seen.float_ = 1
+ except Exception:
+ seen.object_ = 1
+ break
+ else:
+ seen.object_ = 1
+ break
+
+ # we try to coerce datetime w/tz but must all have the same tz
+ if seen.datetimetz_:
+ if len({getattr(val, 'tzinfo', None) for val in objects}) == 1:
+ from pandas import DatetimeIndex
+ return DatetimeIndex(objects)
+ seen.object_ = 1
+
+ if not seen.object_:
+ if not safe:
+ if seen.null_:
+ if seen.is_float_or_complex:
+ if seen.complex_:
+ return complexes
+ elif seen.float_ or seen.int_:
+ return floats
+ else:
+ if not seen.bool_:
+ if seen.datetime_:
+ if not seen.numeric_:
+ return datetimes
+ elif seen.timedelta_:
+ if not seen.numeric_:
+ return timedeltas
+ else:
+ if seen.complex_:
+ return complexes
+ elif seen.float_:
+ return floats
+ elif seen.int_:
+ if seen.uint_:
+ return uints
+ else:
+ return ints
+ elif seen.is_bool:
+ return bools.view(np.bool_)
+
+ else:
+ # don't cast int to float, etc.
+ if seen.null_:
+ if seen.is_float_or_complex:
+ if seen.complex_:
+ if not seen.int_:
+ return complexes
+ elif seen.float_:
+ if not seen.int_:
+ return floats
+ else:
+ if not seen.bool_:
+ if seen.datetime_:
+ if not seen.numeric_:
+ return datetimes
+ elif seen.timedelta_:
+ if not seen.numeric_:
+ return timedeltas
+ else:
+ if seen.complex_:
+ if not seen.int_:
+ return complexes
+ elif seen.float_:
+ if not seen.int_:
+ return floats
+ elif seen.int_:
+ if seen.uint_:
+ return uints
+ else:
+ return ints
+ elif seen.is_bool:
+ return bools.view(np.bool_)
+
+ return objects
+
+
+def map_infer_mask(ndarray arr, object f, ndarray[uint8_t] mask,
+ bint convert=1):
+ """
+ Substitute for np.vectorize with pandas-friendly dtype inference
+
+ Parameters
+ ----------
+ arr : ndarray
+ f : function
+
+ Returns
+ -------
+ mapped : ndarray
+ """
+ cdef:
+ Py_ssize_t i, n
+ ndarray[object] result
+ object val
+
+ n = len(arr)
+ result = np.empty(n, dtype=object)
+ for i in range(n):
+ if mask[i]:
+ val = util.get_value_at(arr, i)
+ else:
+ val = f(util.get_value_at(arr, i))
+
+ if util.is_array(val) and PyArray_NDIM(val) == 0:
+ # unbox 0-dim arrays, GH#690
+ # TODO: is there a faster way to unbox?
+ # item_from_zerodim?
+ val = val.item()
+
+ result[i] = val
+
+ if convert:
+ return maybe_convert_objects(result,
+ try_float=0,
+ convert_datetime=0,
+ convert_timedelta=0)
+
+ return result
+
+
+def map_infer(ndarray arr, object f, bint convert=1):
+ """
+ Substitute for np.vectorize with pandas-friendly dtype inference
+
+ Parameters
+ ----------
+ arr : ndarray
+ f : function
+
+ Returns
+ -------
+ mapped : ndarray
+ """
+ cdef:
+ Py_ssize_t i, n
+ ndarray[object] result
+ object val
+
+ n = len(arr)
+ result = np.empty(n, dtype=object)
+ for i in range(n):
+ val = f(util.get_value_at(arr, i))
+
+ if util.is_array(val) and PyArray_NDIM(val) == 0:
+ # unbox 0-dim arrays, GH#690
+ # TODO: is there a faster way to unbox?
+ # item_from_zerodim?
+ val = val.item()
+
+ result[i] = val
+
+ if convert:
+ return maybe_convert_objects(result,
+ try_float=0,
+ convert_datetime=0,
+ convert_timedelta=0)
+
+ return result
+
+
+def to_object_array(list rows, int min_width=0):
+ """
+ Convert a list of lists into an object array.
+
+ Parameters
+ ----------
+ rows : 2-d array (N, K)
+ A list of lists to be converted into an array
+ min_width : int
+ The minimum width of the object array. If a list
+ in `rows` contains fewer than `width` elements,
+ the remaining elements in the corresponding row
+ will all be `NaN`.
+
+ Returns
+ -------
+ obj_array : numpy array of the object dtype
+ """
+ cdef:
+ Py_ssize_t i, j, n, k, tmp
+ ndarray[object, ndim=2] result
+ list row
+
+ n = len(rows)
+
+ k = min_width
+ for i in range(n):
+ tmp = len(rows[i])
+ if tmp > k:
+ k = tmp
+
+ result = np.empty((n, k), dtype=object)
+
+ for i in range(n):
+ row = rows[i]
+
+ for j in range(len(row)):
+ result[i, j] = row[j]
+
+ return result
+
+
+def tuples_to_object_array(ndarray[object] tuples):
+ cdef:
+ Py_ssize_t i, j, n, k, tmp
+ ndarray[object, ndim=2] result
+ tuple tup
+
+ n = len(tuples)
+ k = len(tuples[0])
+ result = np.empty((n, k), dtype=object)
+ for i in range(n):
+ tup = tuples[i]
+ for j in range(k):
+ result[i, j] = tup[j]
+
+ return result
+
+
+def to_object_array_tuples(list rows):
+ cdef:
+ Py_ssize_t i, j, n, k, tmp
+ ndarray[object, ndim=2] result
+ tuple row
+
+ n = len(rows)
+
+ k = 0
+ for i in range(n):
+ tmp = len(rows[i])
+ if tmp > k:
+ k = tmp
+
+ result = np.empty((n, k), dtype=object)
+
+ try:
+ for i in range(n):
+ row = rows[i]
+ for j in range(len(row)):
+ result[i, j] = row[j]
+ except Exception:
+ # upcast any subclasses to tuple
+ for i in range(n):
+ row = tuple(rows[i])
+ for j in range(len(row)):
+ result[i, j] = row[j]
+
+ return result
+
+
+def fast_multiget(dict mapping, ndarray keys, default=np.nan):
+ cdef:
+ Py_ssize_t i, n = len(keys)
+ object val
+ ndarray[object] output = np.empty(n, dtype='O')
+
+ if n == 0:
+ # kludge, for Series
+ return np.empty(0, dtype='f8')
+
+ keys = getattr(keys, 'values', keys)
+
+ for i in range(n):
+ val = util.get_value_1d(keys, i)
+ if val in mapping:
+ output[i] = mapping[val]
+ else:
+ output[i] = default
+
+ return maybe_convert_objects(output)
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
deleted file mode 100644
index 99846c2771ebe..0000000000000
--- a/pandas/_libs/src/inference.pyx
+++ /dev/null
@@ -1,1566 +0,0 @@
-import sys
-from decimal import Decimal
-cimport util
-cimport cython
-from tslibs.nattype import NaT
-from tslibs.conversion cimport convert_to_tsobject
-from tslibs.timedeltas cimport convert_to_timedelta64
-from tslibs.timezones cimport get_timezone, tz_compare
-
-iNaT = util.get_nat()
-
-cdef bint PY2 = sys.version_info[0] == 2
-cdef double nan = <double> np.NaN
-
-cdef extern from "numpy/arrayobject.h":
- # cython's numpy.dtype specification is incorrect, which leads to
- # errors in issubclass(self.dtype.type, np.bool_), so we directly
- # include the correct version
- # https://github.com/cython/cython/issues/2022
-
- ctypedef class numpy.dtype [object PyArray_Descr]:
- # Use PyDataType_* macros when possible, however there are no macros
- # for accessing some of the fields, so some are defined. Please
- # ask on cython-dev if you need more.
- cdef int type_num
- cdef int itemsize "elsize"
- cdef char byteorder
- cdef object fields
- cdef tuple names
-
-from missing cimport is_null_datetime64, is_null_timedelta64, is_null_period
-
-from util cimport UINT8_MAX, UINT64_MAX, INT64_MAX, INT64_MIN
-
-# core.common import for fast inference checks
-
-cpdef bint is_float(object obj):
- return util.is_float_object(obj)
-
-
-cpdef bint is_integer(object obj):
- return util.is_integer_object(obj)
-
-
-cpdef bint is_bool(object obj):
- return util.is_bool_object(obj)
-
-
-cpdef bint is_complex(object obj):
- return util.is_complex_object(obj)
-
-
-cpdef bint is_decimal(object obj):
- return isinstance(obj, Decimal)
-
-
-cpdef bint is_interval(object obj):
- return getattr(obj, '_typ', '_typ') == 'interval'
-
-
-cpdef bint is_period(object val):
- """ Return a boolean if this is a Period object """
- return util.is_period_object(val)
-
-cdef inline bint is_offset(object val):
- return getattr(val, '_typ', '_typ') == 'dateoffset'
-
-_TYPE_MAP = {
- 'categorical': 'categorical',
- 'category': 'categorical',
- 'int8': 'integer',
- 'int16': 'integer',
- 'int32': 'integer',
- 'int64': 'integer',
- 'i': 'integer',
- 'uint8': 'integer',
- 'uint16': 'integer',
- 'uint32': 'integer',
- 'uint64': 'integer',
- 'u': 'integer',
- 'float32': 'floating',
- 'float64': 'floating',
- 'f': 'floating',
- 'complex128': 'complex',
- 'c': 'complex',
- 'string': 'string' if PY2 else 'bytes',
- 'S': 'string' if PY2 else 'bytes',
- 'unicode': 'unicode' if PY2 else 'string',
- 'U': 'unicode' if PY2 else 'string',
- 'bool': 'boolean',
- 'b': 'boolean',
- 'datetime64[ns]': 'datetime64',
- 'M': 'datetime64',
- 'timedelta64[ns]': 'timedelta64',
- 'm': 'timedelta64',
-}
-
-# types only exist on certain platform
-try:
- np.float128
- _TYPE_MAP['float128'] = 'floating'
-except AttributeError:
- pass
-try:
- np.complex256
- _TYPE_MAP['complex256'] = 'complex'
-except AttributeError:
- pass
-try:
- np.float16
- _TYPE_MAP['float16'] = 'floating'
-except AttributeError:
- pass
-
-
-cdef class Seen(object):
- """
- Class for keeping track of the types of elements
- encountered when trying to perform type conversions.
- """
-
- cdef:
- bint int_ # seen_int
- bint bool_ # seen_bool
- bint null_ # seen_null
- bint uint_ # seen_uint (unsigned integer)
- bint sint_ # seen_sint (signed integer)
- bint float_ # seen_float
- bint object_ # seen_object
- bint complex_ # seen_complex
- bint datetime_ # seen_datetime
- bint coerce_numeric # coerce data to numeric
- bint timedelta_ # seen_timedelta
- bint datetimetz_ # seen_datetimetz
-
- def __cinit__(self, bint coerce_numeric=0):
- """
- Initialize a Seen instance.
-
- Parameters
- ----------
- coerce_numeric : bint, default 0
- Whether or not to force conversion to a numeric data type if
- initial methods to convert to numeric fail.
- """
- self.int_ = 0
- self.bool_ = 0
- self.null_ = 0
- self.uint_ = 0
- self.sint_ = 0
- self.float_ = 0
- self.object_ = 0
- self.complex_ = 0
- self.datetime_ = 0
- self.timedelta_ = 0
- self.datetimetz_ = 0
- self.coerce_numeric = coerce_numeric
-
- cdef inline bint check_uint64_conflict(self) except -1:
- """
- Check whether we can safely convert a uint64 array to a numeric dtype.
-
- There are two cases when conversion to numeric dtype with a uint64
- array is not safe (and will therefore not be performed)
-
- 1) A NaN element is encountered.
-
- uint64 cannot be safely cast to float64 due to truncation issues
- at the extreme ends of the range.
-
- 2) A negative number is encountered.
-
- There is no numerical dtype that can hold both negative numbers
- and numbers greater than INT64_MAX. Hence, at least one number
- will be improperly cast if we convert to a numeric dtype.
-
- Returns
- -------
- return_values : bool
- Whether or not we should return the original input array to avoid
- data truncation.
-
- Raises
- ------
- ValueError : uint64 elements were detected, and at least one of the
- two conflict cases was also detected. However, we are
- trying to force conversion to a numeric dtype.
- """
- return (self.uint_ and (self.null_ or self.sint_)
- and not self.coerce_numeric)
-
- cdef inline saw_null(self):
- """
- Set flags indicating that a null value was encountered.
- """
- self.null_ = 1
- self.float_ = 1
-
- cdef saw_int(self, object val):
- """
- Set flags indicating that an integer value was encountered.
-
- In addition to setting a flag that an integer was seen, we
- also set two flags depending on the type of integer seen:
-
- 1) sint_ : a negative (signed) number in the
- range of [-2**63, 0) was encountered
- 2) uint_ : a positive number in the range of
- [2**63, 2**64) was encountered
-
- Parameters
- ----------
- val : Python int
- Value with which to set the flags.
- """
- self.int_ = 1
- self.sint_ = self.sint_ or (oINT64_MIN <= val < 0)
- self.uint_ = self.uint_ or (oINT64_MAX < val <= oUINT64_MAX)
-
- @property
- def numeric_(self):
- return self.complex_ or self.float_ or self.int_
-
- @property
- def is_bool(self):
- return not (self.datetime_ or self.numeric_ or self.timedelta_)
-
- @property
- def is_float_or_complex(self):
- return not (self.bool_ or self.datetime_ or self.timedelta_)
-
-
-cdef _try_infer_map(v):
- """ if its in our map, just return the dtype """
- cdef:
- object attr, val
- for attr in ['name', 'kind', 'base']:
- val = getattr(v.dtype, attr)
- if val in _TYPE_MAP:
- return _TYPE_MAP[val]
- return None
-
-
-def infer_dtype(object value, bint skipna=False):
- """
- Efficiently infer the type of a passed val, or list-like
- array of values. Return a string describing the type.
-
- Parameters
- ----------
- value : scalar, list, ndarray, or pandas type
- skipna : bool, default False
- Ignore NaN values when inferring the type. The default of ``False``
- will be deprecated in a later version of pandas.
-
- .. versionadded:: 0.21.0
-
- Returns
- -------
- string describing the common type of the input data.
- Results can include:
-
- - string
- - unicode
- - bytes
- - floating
- - integer
- - mixed-integer
- - mixed-integer-float
- - decimal
- - complex
- - categorical
- - boolean
- - datetime64
- - datetime
- - date
- - timedelta64
- - timedelta
- - time
- - period
- - mixed
-
- Raises
- ------
- TypeError if ndarray-like but cannot infer the dtype
-
- Notes
- -----
- - 'mixed' is the catchall for anything that is not otherwise
- specialized
- - 'mixed-integer-float' are floats and integers
- - 'mixed-integer' are integers mixed with non-integers
-
- Examples
- --------
- >>> infer_dtype(['foo', 'bar'])
- 'string'
-
- >>> infer_dtype(['a', np.nan, 'b'], skipna=True)
- 'string'
-
- >>> infer_dtype(['a', np.nan, 'b'], skipna=False)
- 'mixed'
-
- >>> infer_dtype([b'foo', b'bar'])
- 'bytes'
-
- >>> infer_dtype([1, 2, 3])
- 'integer'
-
- >>> infer_dtype([1, 2, 3.5])
- 'mixed-integer-float'
-
- >>> infer_dtype([1.0, 2.0, 3.5])
- 'floating'
-
- >>> infer_dtype(['a', 1])
- 'mixed-integer'
-
- >>> infer_dtype([Decimal(1), Decimal(2.0)])
- 'decimal'
-
- >>> infer_dtype([True, False])
- 'boolean'
-
- >>> infer_dtype([True, False, np.nan])
- 'mixed'
-
- >>> infer_dtype([pd.Timestamp('20130101')])
- 'datetime'
-
- >>> infer_dtype([datetime.date(2013, 1, 1)])
- 'date'
-
- >>> infer_dtype([np.datetime64('2013-01-01')])
- 'datetime64'
-
- >>> infer_dtype([datetime.timedelta(0, 1, 1)])
- 'timedelta'
-
- >>> infer_dtype(pd.Series(list('aabc')).astype('category'))
- 'categorical'
- """
- cdef:
- Py_ssize_t i, n
- object val
- ndarray values
- bint seen_pdnat = False
- bint seen_val = False
-
- if util.is_array(value):
- values = value
- elif hasattr(value, 'dtype'):
-
- # this will handle ndarray-like
- # e.g. categoricals
- try:
- values = getattr(value, '_values', getattr(
- value, 'values', value))
- except:
- value = _try_infer_map(value)
- if value is not None:
- return value
-
- # its ndarray like but we can't handle
- raise ValueError("cannot infer type for {0}".format(type(value)))
-
- else:
- if not PyList_Check(value):
- value = list(value)
- from pandas.core.dtypes.cast import (
- construct_1d_object_array_from_listlike)
- values = construct_1d_object_array_from_listlike(value)
-
- values = getattr(values, 'values', values)
- val = _try_infer_map(values)
- if val is not None:
- return val
-
- if values.dtype != np.object_:
- values = values.astype('O')
-
- # make contiguous
- values = values.ravel()
-
- n = len(values)
- if n == 0:
- return 'empty'
-
- # try to use a valid value
- for i in range(n):
- val = util.get_value_1d(values, i)
-
- # do not use is_nul_datetimelike to keep
- # np.datetime64('nat') and np.timedelta64('nat')
- if val is None or util.is_nan(val):
- pass
- elif val is NaT:
- seen_pdnat = True
- else:
- seen_val = True
- break
-
- # if all values are nan/NaT
- if seen_val is False and seen_pdnat is True:
- return 'datetime'
- # float/object nan is handled in latter logic
-
- if util.is_datetime64_object(val):
- if is_datetime64_array(values):
- return 'datetime64'
- elif is_timedelta_or_timedelta64_array(values):
- return 'timedelta'
-
- elif is_timedelta(val):
- if is_timedelta_or_timedelta64_array(values):
- return 'timedelta'
-
- elif util.is_integer_object(val):
- # a timedelta will show true here as well
- if is_timedelta(val):
- if is_timedelta_or_timedelta64_array(values):
- return 'timedelta'
-
- if is_integer_array(values):
- return 'integer'
- elif is_integer_float_array(values):
- return 'mixed-integer-float'
- elif is_timedelta_or_timedelta64_array(values):
- return 'timedelta'
- return 'mixed-integer'
-
- elif is_datetime(val):
- if is_datetime_array(values):
- return 'datetime'
-
- elif is_date(val):
- if is_date_array(values, skipna=skipna):
- return 'date'
-
- elif is_time(val):
- if is_time_array(values, skipna=skipna):
- return 'time'
-
- elif is_decimal(val):
- return 'decimal'
-
- elif util.is_float_object(val):
- if is_float_array(values):
- return 'floating'
- elif is_integer_float_array(values):
- return 'mixed-integer-float'
-
- elif util.is_bool_object(val):
- if is_bool_array(values, skipna=skipna):
- return 'boolean'
-
- elif PyString_Check(val):
- if is_string_array(values, skipna=skipna):
- return 'string'
-
- elif PyUnicode_Check(val):
- if is_unicode_array(values, skipna=skipna):
- return 'unicode'
-
- elif PyBytes_Check(val):
- if is_bytes_array(values, skipna=skipna):
- return 'bytes'
-
- elif is_period(val):
- if is_period_array(values):
- return 'period'
-
- elif is_interval(val):
- if is_interval_array(values):
- return 'interval'
-
- for i in range(n):
- val = util.get_value_1d(values, i)
- if (util.is_integer_object(val) and
- not util.is_timedelta64_object(val) and
- not util.is_datetime64_object(val)):
- return 'mixed-integer'
-
- return 'mixed'
-
-
-cpdef object infer_datetimelike_array(object arr):
- """
- infer if we have a datetime or timedelta array
- - date: we have *only* date and maybe strings, nulls
- - datetime: we have *only* datetimes and maybe strings, nulls
- - timedelta: we have *only* timedeltas and maybe strings, nulls
- - nat: we do not have *any* date, datetimes or timedeltas, but do have
- at least a NaT
- - mixed: other objects (strings, a mix of tz-aware and tz-naive, or
- actual objects)
-
- Parameters
- ----------
- arr : object array
-
- Returns
- -------
- string: {datetime, timedelta, date, nat, mixed}
-
- """
-
- cdef:
- Py_ssize_t i, n = len(arr)
- bint seen_timedelta = 0, seen_date = 0, seen_datetime = 0
- bint seen_tz_aware = 0, seen_tz_naive = 0
- bint seen_nat = 0
- list objs = []
- object v
-
- for i in range(n):
- v = arr[i]
- if util.is_string_object(v):
- objs.append(v)
-
- if len(objs) == 3:
- break
-
- elif v is None or util.is_nan(v):
- # nan or None
- pass
- elif v is NaT:
- seen_nat = 1
- elif is_datetime(v):
- # datetime
- seen_datetime = 1
-
- # disambiguate between tz-naive and tz-aware
- if v.tzinfo is None:
- seen_tz_naive = 1
- else:
- seen_tz_aware = 1
-
- if seen_tz_naive and seen_tz_aware:
- return 'mixed'
- elif util.is_datetime64_object(v):
- # np.datetime64
- seen_datetime = 1
- elif is_date(v):
- seen_date = 1
- elif is_timedelta(v) or util.is_timedelta64_object(v):
- # timedelta, or timedelta64
- seen_timedelta = 1
- else:
- return 'mixed'
-
- if seen_date and not (seen_datetime or seen_timedelta):
- return 'date'
- elif seen_datetime and not seen_timedelta:
- return 'datetime'
- elif seen_timedelta and not seen_datetime:
- return 'timedelta'
- elif seen_nat:
- return 'nat'
-
- # short-circuit by trying to
- # actually convert these strings
- # this is for performance as we don't need to try
- # convert *every* string array
- if len(objs):
- try:
- array_to_datetime(objs, errors='raise')
- return 'datetime'
- except:
- pass
-
- # we are *not* going to infer from strings
- # for timedelta as too much ambiguity
-
- return 'mixed'
-
-
-cdef inline bint is_datetime(object o):
- return PyDateTime_Check(o)
-
-cdef inline bint is_date(object o):
- return PyDate_Check(o)
-
-cdef inline bint is_time(object o):
- return PyTime_Check(o)
-
-cdef inline bint is_timedelta(object o):
- return PyDelta_Check(o) or util.is_timedelta64_object(o)
-
-
-cdef class Validator:
-
- cdef:
- Py_ssize_t n
- dtype dtype
- bint skipna
-
- def __cinit__(
- self,
- Py_ssize_t n,
- dtype dtype=np.dtype(np.object_),
- bint skipna=False
- ):
- self.n = n
- self.dtype = dtype
- self.skipna = skipna
-
- cdef bint validate(self, ndarray values) except -1:
- if not self.n:
- return False
-
- if self.is_array_typed():
- return True
- elif self.dtype.type_num == NPY_OBJECT:
- if self.skipna:
- return self._validate_skipna(values)
- else:
- return self._validate(values)
- else:
- return False
-
- @cython.wraparound(False)
- @cython.boundscheck(False)
- cdef bint _validate(self, ndarray values) except -1:
- cdef:
- Py_ssize_t i
- Py_ssize_t n = self.n
-
- for i in range(n):
- if not self.is_valid(values[i]):
- return False
-
- return self.finalize_validate()
-
- @cython.wraparound(False)
- @cython.boundscheck(False)
- cdef bint _validate_skipna(self, ndarray values) except -1:
- cdef:
- Py_ssize_t i
- Py_ssize_t n = self.n
-
- for i in range(n):
- if not self.is_valid_skipna(values[i]):
- return False
-
- return self.finalize_validate_skipna()
-
- cdef bint is_valid(self, object value) except -1:
- return self.is_value_typed(value)
-
- cdef bint is_valid_skipna(self, object value) except -1:
- return self.is_valid(value) or self.is_valid_null(value)
-
- cdef bint is_value_typed(self, object value) except -1:
- raise NotImplementedError(
- '{} child class must define is_value_typed'.format(
- type(self).__name__
- )
- )
-
- cdef bint is_valid_null(self, object value) except -1:
- return value is None or util.is_nan(value)
-
- cdef bint is_array_typed(self) except -1:
- return False
-
- cdef inline bint finalize_validate(self):
- return True
-
- cdef bint finalize_validate_skipna(self):
- # TODO(phillipc): Remove the existing validate methods and replace them
- # with the skipna versions upon full deprecation of skipna=False
- return True
-
-
-cdef class BoolValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return util.is_bool_object(value)
-
- cdef inline bint is_array_typed(self) except -1:
- return issubclass(self.dtype.type, np.bool_)
-
-
-cpdef bint is_bool_array(ndarray values, bint skipna=False):
- cdef:
- BoolValidator validator = BoolValidator(
- len(values),
- values.dtype,
- skipna=skipna
- )
- return validator.validate(values)
-
-
-cdef class IntegerValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return util.is_integer_object(value)
-
- cdef inline bint is_array_typed(self) except -1:
- return issubclass(self.dtype.type, np.integer)
-
-
-cpdef bint is_integer_array(ndarray values):
- cdef:
- IntegerValidator validator = IntegerValidator(
- len(values),
- values.dtype,
- )
- return validator.validate(values)
-
-
-cdef class IntegerFloatValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return util.is_integer_object(value) or util.is_float_object(value)
-
- cdef inline bint is_array_typed(self) except -1:
- return issubclass(self.dtype.type, np.integer)
-
-
-cdef bint is_integer_float_array(ndarray values):
- cdef:
- IntegerFloatValidator validator = IntegerFloatValidator(
- len(values),
- values.dtype,
- )
- return validator.validate(values)
-
-
-cdef class FloatValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return util.is_float_object(value)
-
- cdef inline bint is_array_typed(self) except -1:
- return issubclass(self.dtype.type, np.floating)
-
-
-cpdef bint is_float_array(ndarray values):
- cdef FloatValidator validator = FloatValidator(len(values), values.dtype)
- return validator.validate(values)
-
-
-cdef class StringValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return PyString_Check(value)
-
- cdef inline bint is_array_typed(self) except -1:
- return issubclass(self.dtype.type, np.str_)
-
-
-cpdef bint is_string_array(ndarray values, bint skipna=False):
- cdef:
- StringValidator validator = StringValidator(
- len(values),
- values.dtype,
- skipna=skipna,
- )
- return validator.validate(values)
-
-
-cdef class UnicodeValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return PyUnicode_Check(value)
-
- cdef inline bint is_array_typed(self) except -1:
- return issubclass(self.dtype.type, np.unicode_)
-
-
-cdef bint is_unicode_array(ndarray values, bint skipna=False):
- cdef:
- UnicodeValidator validator = UnicodeValidator(
- len(values),
- values.dtype,
- skipna=skipna,
- )
- return validator.validate(values)
-
-
-cdef class BytesValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return PyBytes_Check(value)
-
- cdef inline bint is_array_typed(self) except -1:
- return issubclass(self.dtype.type, np.bytes_)
-
-
-cdef bint is_bytes_array(ndarray values, bint skipna=False):
- cdef:
- BytesValidator validator = BytesValidator(
- len(values),
- values.dtype,
- skipna=skipna
- )
- return validator.validate(values)
-
-
-cdef class TemporalValidator(Validator):
-
- cdef Py_ssize_t generic_null_count
-
- def __cinit__(
- self,
- Py_ssize_t n,
- dtype dtype=np.dtype(np.object_),
- bint skipna=False
- ):
- self.n = n
- self.dtype = dtype
- self.skipna = skipna
- self.generic_null_count = 0
-
- cdef inline bint is_valid(self, object value) except -1:
- return self.is_value_typed(value) or self.is_valid_null(value)
-
- cdef bint is_valid_null(self, object value) except -1:
- raise NotImplementedError(
- '{} child class must define is_valid_null'.format(
- type(self).__name__
- )
- )
-
- cdef inline bint is_valid_skipna(self, object value) except -1:
- cdef:
- bint is_typed_null = self.is_valid_null(value)
- bint is_generic_null = value is None or util.is_nan(value)
- self.generic_null_count += is_typed_null and is_generic_null
- return self.is_value_typed(value) or is_typed_null or is_generic_null
-
- cdef inline bint finalize_validate_skipna(self):
- return self.generic_null_count != self.n
-
-
-cdef class DatetimeValidator(TemporalValidator):
-
- cdef bint is_value_typed(self, object value) except -1:
- return is_datetime(value)
-
- cdef inline bint is_valid_null(self, object value) except -1:
- return is_null_datetime64(value)
-
-
-cpdef bint is_datetime_array(ndarray values):
- cdef:
- DatetimeValidator validator = DatetimeValidator(
- len(values),
- skipna=True,
- )
- return validator.validate(values)
-
-
-cdef class Datetime64Validator(DatetimeValidator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return util.is_datetime64_object(value)
-
-
-cpdef bint is_datetime64_array(ndarray values):
- cdef:
- Datetime64Validator validator = Datetime64Validator(
- len(values),
- skipna=True,
- )
- return validator.validate(values)
-
-
-cpdef bint is_datetime_with_singletz_array(ndarray values):
- """
- Check values have the same tzinfo attribute.
- Doesn't check values are datetime-like types.
- """
-
- cdef Py_ssize_t i, j, n = len(values)
- cdef object base_val, base_tz, val, tz
-
- if n == 0:
- return False
-
- for i in range(n):
- base_val = values[i]
- if base_val is not NaT:
- base_tz = get_timezone(getattr(base_val, 'tzinfo', None))
-
- for j in range(i, n):
- val = values[j]
- if val is not NaT:
- tz = getattr(val, 'tzinfo', None)
- if not tz_compare(base_tz, tz):
- return False
- break
-
- return True
-
-
-cdef class TimedeltaValidator(TemporalValidator):
-
- cdef bint is_value_typed(self, object value) except -1:
- return PyDelta_Check(value)
-
- cdef inline bint is_valid_null(self, object value) except -1:
- return is_null_timedelta64(value)
-
-
-cpdef bint is_timedelta_array(ndarray values):
- cdef:
- TimedeltaValidator validator = TimedeltaValidator(
- len(values),
- skipna=True,
- )
- return validator.validate(values)
-
-
-cdef class Timedelta64Validator(TimedeltaValidator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return util.is_timedelta64_object(value)
-
-
-cpdef bint is_timedelta64_array(ndarray values):
- cdef:
- Timedelta64Validator validator = Timedelta64Validator(
- len(values),
- skipna=True,
- )
- return validator.validate(values)
-
-
-cdef class AnyTimedeltaValidator(TimedeltaValidator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return is_timedelta(value)
-
-
-cpdef bint is_timedelta_or_timedelta64_array(ndarray values):
- """ infer with timedeltas and/or nat/none """
- cdef:
- AnyTimedeltaValidator validator = AnyTimedeltaValidator(
- len(values),
- skipna=True,
- )
- return validator.validate(values)
-
-
-cdef class DateValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return is_date(value)
-
-
-cpdef bint is_date_array(ndarray values, bint skipna=False):
- cdef DateValidator validator = DateValidator(len(values), skipna=skipna)
- return validator.validate(values)
-
-
-cdef class TimeValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return is_time(value)
-
-
-cpdef bint is_time_array(ndarray values, bint skipna=False):
- cdef TimeValidator validator = TimeValidator(len(values), skipna=skipna)
- return validator.validate(values)
-
-
-cdef class PeriodValidator(TemporalValidator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return is_period(value)
-
- cdef inline bint is_valid_null(self, object value) except -1:
- return is_null_period(value)
-
-
-cpdef bint is_period_array(ndarray values):
- cdef PeriodValidator validator = PeriodValidator(len(values), skipna=True)
- return validator.validate(values)
-
-
-cdef class IntervalValidator(Validator):
-
- cdef inline bint is_value_typed(self, object value) except -1:
- return is_interval(value)
-
-
-cpdef bint is_interval_array(ndarray values):
- cdef:
- IntervalValidator validator = IntervalValidator(
- len(values),
- skipna=True,
- )
- return validator.validate(values)
-
-
-cdef extern from "parse_helper.h":
- int floatify(object, double *result, int *maybe_int) except -1
-
-# constants that will be compared to potentially arbitrarily large
-# python int
-cdef object oINT64_MAX = <int64_t> INT64_MAX
-cdef object oINT64_MIN = <int64_t> INT64_MIN
-cdef object oUINT64_MAX = <uint64_t> UINT64_MAX
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def maybe_convert_numeric(ndarray[object] values, set na_values,
- bint convert_empty=True, bint coerce_numeric=False):
- """
- Convert object array to a numeric array if possible.
-
- Parameters
- ----------
- values : ndarray
- Array of object elements to convert.
- na_values : set
- Set of values that should be interpreted as NaN.
- convert_empty : bool, default True
- If an empty array-like object is encountered, whether to interpret
- that element as NaN or not. If set to False, a ValueError will be
- raised if such an element is encountered and 'coerce_numeric' is False.
- coerce_numeric : bool, default False
- If initial attempts to convert to numeric have failed, whether to
- force conversion to numeric via alternative methods or by setting the
- element to NaN. Otherwise, an Exception will be raised when such an
- element is encountered.
-
- This boolean also has an impact on how conversion behaves when a
- numeric array has no suitable numerical dtype to return (i.e. uint64,
- int32, uint8). If set to False, the original object array will be
- returned. Otherwise, a ValueError will be raised.
-
- Returns
- -------
- numeric_array : array of converted object values to numerical ones
- """
-
- if len(values) == 0:
- return np.array([], dtype='i8')
-
- # fastpath for ints - try to convert all based on first value
- cdef object val = values[0]
-
- if util.is_integer_object(val):
- try:
- maybe_ints = values.astype('i8')
- if (maybe_ints == values).all():
- return maybe_ints
- except (ValueError, OverflowError, TypeError):
- pass
-
- # otherwise, iterate and do full infererence
- cdef:
- int status, maybe_int
- Py_ssize_t i, n = values.size
- Seen seen = Seen(coerce_numeric)
- ndarray[float64_t] floats = np.empty(n, dtype='f8')
- ndarray[complex128_t] complexes = np.empty(n, dtype='c16')
- ndarray[int64_t] ints = np.empty(n, dtype='i8')
- ndarray[uint64_t] uints = np.empty(n, dtype='u8')
- ndarray[uint8_t] bools = np.empty(n, dtype='u1')
- float64_t fval
-
- for i in range(n):
- val = values[i]
-
- if val.__hash__ is not None and val in na_values:
- seen.saw_null()
- floats[i] = complexes[i] = nan
- elif util.is_float_object(val):
- fval = val
- if fval != fval:
- seen.null_ = True
-
- floats[i] = complexes[i] = fval
- seen.float_ = True
- elif util.is_integer_object(val):
- floats[i] = complexes[i] = val
-
- val = int(val)
- seen.saw_int(val)
-
- if val >= 0:
- if val <= oUINT64_MAX:
- uints[i] = val
- else:
- seen.float_ = True
-
- if val <= oINT64_MAX:
- ints[i] = val
-
- if seen.sint_ and seen.uint_:
- seen.float_ = True
-
- elif util.is_bool_object(val):
- floats[i] = uints[i] = ints[i] = bools[i] = val
- seen.bool_ = True
- elif val is None:
- seen.saw_null()
- floats[i] = complexes[i] = nan
- elif hasattr(val, '__len__') and len(val) == 0:
- if convert_empty or seen.coerce_numeric:
- seen.saw_null()
- floats[i] = complexes[i] = nan
- else:
- raise ValueError('Empty string encountered')
- elif util.is_complex_object(val):
- complexes[i] = val
- seen.complex_ = True
- elif is_decimal(val):
- floats[i] = complexes[i] = val
- seen.float_ = True
- else:
- try:
- status = floatify(val, &fval, &maybe_int)
-
- if fval in na_values:
- seen.saw_null()
- floats[i] = complexes[i] = nan
- else:
- if fval != fval:
- seen.null_ = True
-
- floats[i] = fval
-
- if maybe_int:
- as_int = int(val)
-
- if as_int in na_values:
- seen.saw_null()
- else:
- seen.saw_int(as_int)
-
- if not (seen.float_ or as_int in na_values):
- if as_int < oINT64_MIN or as_int > oUINT64_MAX:
- raise ValueError('Integer out of range.')
-
- if as_int >= 0:
- uints[i] = as_int
- if as_int <= oINT64_MAX:
- ints[i] = as_int
-
- seen.float_ = seen.float_ or (seen.uint_ and seen.sint_)
- else:
- seen.float_ = True
- except (TypeError, ValueError) as e:
- if not seen.coerce_numeric:
- raise type(e)(str(e) + ' at position {}'.format(i))
- elif "uint64" in str(e): # Exception from check functions.
- raise
- seen.saw_null()
- floats[i] = nan
-
- if seen.check_uint64_conflict():
- return values
-
- if seen.complex_:
- return complexes
- elif seen.float_:
- return floats
- elif seen.int_:
- if seen.uint_:
- return uints
- else:
- return ints
- elif seen.bool_:
- return bools.view(np.bool_)
- elif seen.uint_:
- return uints
- return ints
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
- bint safe=0, bint convert_datetime=0,
- bint convert_timedelta=0):
- """
- Type inference function-- convert object array to proper dtype
- """
- cdef:
- Py_ssize_t i, n
- ndarray[float64_t] floats
- ndarray[complex128_t] complexes
- ndarray[int64_t] ints
- ndarray[uint64_t] uints
- ndarray[uint8_t] bools
- ndarray[int64_t] idatetimes
- ndarray[int64_t] itimedeltas
- Seen seen = Seen()
- object val
- float64_t fval, fnan
-
- n = len(objects)
-
- floats = np.empty(n, dtype='f8')
- complexes = np.empty(n, dtype='c16')
- ints = np.empty(n, dtype='i8')
- uints = np.empty(n, dtype='u8')
- bools = np.empty(n, dtype=np.uint8)
-
- if convert_datetime:
- datetimes = np.empty(n, dtype='M8[ns]')
- idatetimes = datetimes.view(np.int64)
-
- if convert_timedelta:
- timedeltas = np.empty(n, dtype='m8[ns]')
- itimedeltas = timedeltas.view(np.int64)
-
- fnan = np.nan
-
- for i from 0 <= i < n:
- val = objects[i]
-
- if val is None:
- seen.null_ = 1
- floats[i] = complexes[i] = fnan
- elif val is NaT:
- if convert_datetime:
- idatetimes[i] = iNaT
- seen.datetime_ = 1
- if convert_timedelta:
- itimedeltas[i] = iNaT
- seen.timedelta_ = 1
- if not (convert_datetime or convert_timedelta):
- seen.object_ = 1
- elif util.is_bool_object(val):
- seen.bool_ = 1
- bools[i] = val
- elif util.is_float_object(val):
- floats[i] = complexes[i] = val
- seen.float_ = 1
- elif util.is_datetime64_object(val):
- if convert_datetime:
- idatetimes[i] = convert_to_tsobject(
- val, None, None, 0, 0).value
- seen.datetime_ = 1
- else:
- seen.object_ = 1
- break
- elif is_timedelta(val):
- if convert_timedelta:
- itimedeltas[i] = convert_to_timedelta64(val, 'ns')
- seen.timedelta_ = 1
- else:
- seen.object_ = 1
- break
- elif util.is_integer_object(val):
- seen.int_ = 1
- floats[i] = <float64_t> val
- complexes[i] = <double complex> val
- if not seen.null_:
- seen.saw_int(int(val))
-
- if ((seen.uint_ and seen.sint_) or
- val > oUINT64_MAX or val < oINT64_MIN):
- seen.object_ = 1
- break
-
- if seen.uint_:
- uints[i] = val
- elif seen.sint_:
- ints[i] = val
- else:
- uints[i] = val
- ints[i] = val
-
- elif util.is_complex_object(val):
- complexes[i] = val
- seen.complex_ = 1
- elif PyDateTime_Check(val) or util.is_datetime64_object(val):
-
- # if we have an tz's attached then return the objects
- if convert_datetime:
- if getattr(val, 'tzinfo', None) is not None:
- seen.datetimetz_ = 1
- break
- else:
- seen.datetime_ = 1
- idatetimes[i] = convert_to_tsobject(
- val, None, None, 0, 0).value
- else:
- seen.object_ = 1
- break
- elif try_float and not util.is_string_object(val):
- # this will convert Decimal objects
- try:
- floats[i] = float(val)
- complexes[i] = complex(val)
- seen.float_ = 1
- except Exception:
- seen.object_ = 1
- break
- else:
- seen.object_ = 1
- break
-
- # we try to coerce datetime w/tz but must all have the same tz
- if seen.datetimetz_:
- if len({getattr(val, 'tzinfo', None) for val in objects}) == 1:
- from pandas import DatetimeIndex
- return DatetimeIndex(objects)
- seen.object_ = 1
-
- if not seen.object_:
- if not safe:
- if seen.null_:
- if seen.is_float_or_complex:
- if seen.complex_:
- return complexes
- elif seen.float_ or seen.int_:
- return floats
- else:
- if not seen.bool_:
- if seen.datetime_:
- if not seen.numeric_:
- return datetimes
- elif seen.timedelta_:
- if not seen.numeric_:
- return timedeltas
- else:
- if seen.complex_:
- return complexes
- elif seen.float_:
- return floats
- elif seen.int_:
- if seen.uint_:
- return uints
- else:
- return ints
- elif seen.is_bool:
- return bools.view(np.bool_)
-
- else:
- # don't cast int to float, etc.
- if seen.null_:
- if seen.is_float_or_complex:
- if seen.complex_:
- if not seen.int_:
- return complexes
- elif seen.float_:
- if not seen.int_:
- return floats
- else:
- if not seen.bool_:
- if seen.datetime_:
- if not seen.numeric_:
- return datetimes
- elif seen.timedelta_:
- if not seen.numeric_:
- return timedeltas
- else:
- if seen.complex_:
- if not seen.int_:
- return complexes
- elif seen.float_:
- if not seen.int_:
- return floats
- elif seen.int_:
- if seen.uint_:
- return uints
- else:
- return ints
- elif seen.is_bool:
- return bools.view(np.bool_)
-
- return objects
-
-
-def map_infer_mask(ndarray arr, object f, ndarray[uint8_t] mask,
- bint convert=1):
- """
- Substitute for np.vectorize with pandas-friendly dtype inference
-
- Parameters
- ----------
- arr : ndarray
- f : function
-
- Returns
- -------
- mapped : ndarray
- """
- cdef:
- Py_ssize_t i, n
- ndarray[object] result
- object val
-
- n = len(arr)
- result = np.empty(n, dtype=object)
- for i in range(n):
- if mask[i]:
- val = util.get_value_at(arr, i)
- else:
- val = f(util.get_value_at(arr, i))
-
- # unbox 0-dim arrays, GH #690
- if is_array(val) and PyArray_NDIM(val) == 0:
- # is there a faster way to unbox?
- val = val.item()
-
- result[i] = val
-
- if convert:
- return maybe_convert_objects(result,
- try_float=0,
- convert_datetime=0,
- convert_timedelta=0)
-
- return result
-
-
-def map_infer(ndarray arr, object f, bint convert=1):
- """
- Substitute for np.vectorize with pandas-friendly dtype inference
-
- Parameters
- ----------
- arr : ndarray
- f : function
-
- Returns
- -------
- mapped : ndarray
- """
- cdef:
- Py_ssize_t i, n
- ndarray[object] result
- object val
-
- n = len(arr)
- result = np.empty(n, dtype=object)
- for i in range(n):
- val = f(util.get_value_at(arr, i))
-
- # unbox 0-dim arrays, GH #690
- if is_array(val) and PyArray_NDIM(val) == 0:
- # is there a faster way to unbox?
- val = val.item()
-
- result[i] = val
-
- if convert:
- return maybe_convert_objects(result,
- try_float=0,
- convert_datetime=0,
- convert_timedelta=0)
-
- return result
-
-
-def to_object_array(list rows, int min_width=0):
- """
- Convert a list of lists into an object array.
-
- Parameters
- ----------
- rows : 2-d array (N, K)
- A list of lists to be converted into an array
- min_width : int
- The minimum width of the object array. If a list
- in `rows` contains fewer than `width` elements,
- the remaining elements in the corresponding row
- will all be `NaN`.
-
- Returns
- -------
- obj_array : numpy array of the object dtype
- """
- cdef:
- Py_ssize_t i, j, n, k, tmp
- ndarray[object, ndim=2] result
- list row
-
- n = len(rows)
-
- k = min_width
- for i from 0 <= i < n:
- tmp = len(rows[i])
- if tmp > k:
- k = tmp
-
- result = np.empty((n, k), dtype=object)
-
- for i from 0 <= i < n:
- row = rows[i]
-
- for j from 0 <= j < len(row):
- result[i, j] = row[j]
-
- return result
-
-
-def tuples_to_object_array(ndarray[object] tuples):
- cdef:
- Py_ssize_t i, j, n, k, tmp
- ndarray[object, ndim=2] result
- tuple tup
-
- n = len(tuples)
- k = len(tuples[0])
- result = np.empty((n, k), dtype=object)
- for i in range(n):
- tup = tuples[i]
- for j in range(k):
- result[i, j] = tup[j]
-
- return result
-
-
-def to_object_array_tuples(list rows):
- cdef:
- Py_ssize_t i, j, n, k, tmp
- ndarray[object, ndim=2] result
- tuple row
-
- n = len(rows)
-
- k = 0
- for i from 0 <= i < n:
- tmp = len(rows[i])
- if tmp > k:
- k = tmp
-
- result = np.empty((n, k), dtype=object)
-
- try:
- for i in range(n):
- row = rows[i]
- for j from 0 <= j < len(row):
- result[i, j] = row[j]
- except Exception:
- # upcast any subclasses to tuple
- for i in range(n):
- row = tuple(rows[i])
- for j from 0 <= j < len(row):
- result[i, j] = row[j]
-
- return result
-
-
-def fast_multiget(dict mapping, ndarray keys, default=np.nan):
- cdef:
- Py_ssize_t i, n = len(keys)
- object val
- ndarray[object] output = np.empty(n, dtype='O')
-
- if n == 0:
- # kludge, for Series
- return np.empty(0, dtype='f8')
-
- keys = getattr(keys, 'values', keys)
-
- for i in range(n):
- val = util.get_value_1d(keys, i)
- if val in mapping:
- output[i] = mapping[val]
- else:
- output[i] = default
-
- return maybe_convert_objects(output)
diff --git a/pandas/_libs/src/parse_helper.h b/pandas/_libs/src/parse_helper.h
index 6dd8b66eab33d..d17d9166ea3ee 100644
--- a/pandas/_libs/src/parse_helper.h
+++ b/pandas/_libs/src/parse_helper.h
@@ -12,6 +12,7 @@ The full license is in the LICENSE file, distributed with this software.
#include <errno.h>
#include <float.h>
+#include "helper.h"
#include "headers/portable.h"
static double xstrtod(const char *p, char **q, char decimal, char sci,
diff --git a/setup.py b/setup.py
index 4fdfc0ab7de0d..964167737c9c6 100755
--- a/setup.py
+++ b/setup.py
@@ -483,26 +483,17 @@ def maybe_cythonize(extensions, *args, **kwargs):
return extensions
-lib_depends = ['inference']
-
-
def srcpath(name=None, suffix='.pyx', subdir='src'):
return pjoin('pandas', subdir, name + suffix)
-if suffix == '.pyx':
- lib_depends = [srcpath(f, suffix='.pyx', subdir='_libs/src')
- for f in lib_depends]
-else:
- lib_depends = []
-
common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src']
ts_include = ['pandas/_libs/tslibs/src']
-lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h',
- 'pandas/_libs/src/parse_helper.h',
- 'pandas/_libs/src/compat_helper.h']
+lib_depends = ['pandas/_libs/src/numpy_helper.h',
+ 'pandas/_libs/src/parse_helper.h',
+ 'pandas/_libs/src/compat_helper.h']
np_datetime_headers = [
'pandas/_libs/tslibs/src/datetime/np_datetime.h',
| Continue trimming down _libs/src.
src/inference is a minor hassle, particularly because ATM there are imports/cimports in both it and lib. Its existence also complicates setup.py a bit.
Evidently parse_helper.h depended on the order in which it was included (I assume this is a code smell in C, but who knows). Fixed this.
This is almost cut/paste. The changes are:
- Arrange (and de-duplicate) imports at the top of lib
- whitespace fixups
- change `for i from 0 <= i < n:` to `for i in range(n):` in a few places
- a couple of comments about potential optimizations in the next pass
| https://api.github.com/repos/pandas-dev/pandas/pulls/22219 | 2018-08-06T17:15:58Z | 2018-08-08T10:38:49Z | 2018-08-08T10:38:49Z | 2018-08-08T15:50:46Z |
Add missing period to pandas.Series.interpolate docstring | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f62605c342702..08d9191d72a8a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6098,9 +6098,9 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
* 'index', 'values': use the actual numerical values of the index
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'polynomial' is passed to
- ``scipy.interpolate.interp1d``. Both 'polynomial' and 'spline'
- require that you also specify an `order` (int),
- e.g. df.interpolate(method='polynomial', order=4).
+ :class:`scipy.interpolate.interp1d`. Both 'polynomial' and
+ 'spline' require that you also specify an `order` (int),
+ e.g. ``df.interpolate(method='polynomial', order=4)``.
These use the actual numerical values of the index.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
are all wrappers around the scipy interpolation methods of
@@ -6110,13 +6110,14 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
<http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `tutorial documentation
<http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__
- * 'from_derivatives' refers to BPoly.from_derivatives which
+ * 'from_derivatives' refers to
+ :meth:`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18
.. versionadded:: 0.18.1
- Added support for the 'akima' method
+ Added support for the 'akima' method.
Added interpolate method 'from_derivatives' which replaces
'piecewise_polynomial' in scipy 0.18; backwards-compatible with
scipy < 0.18
| Previously, the _new in version_ message seemed like a run-on sentence:
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.interpolate.html
Also added code formatting for `BPoly.from_derivatives` reference. | https://api.github.com/repos/pandas-dev/pandas/pulls/22217 | 2018-08-06T16:01:14Z | 2018-08-08T10:39:48Z | 2018-08-08T10:39:48Z | 2018-08-08T12:56:05Z |
DOC: Added 0.23.4 whatsnew [ci skip] | diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst
index 436bbeae5d08f..8672685b3ebb4 100644
--- a/doc/source/whatsnew.rst
+++ b/doc/source/whatsnew.rst
@@ -20,6 +20,8 @@ These are new features and improvements of note in each release.
.. include:: whatsnew/v0.24.0.txt
+.. include:: whatsnew/v0.23.4.txt
+
.. include:: whatsnew/v0.23.3.txt
.. include:: whatsnew/v0.23.2.txt
| [ci skip] | https://api.github.com/repos/pandas-dev/pandas/pulls/22214 | 2018-08-06T11:17:33Z | 2018-08-06T11:17:49Z | 2018-08-06T11:17:49Z | 2018-08-06T11:17:53Z |
Fix Python2 test failures in certain locales | diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py
index dabafb1f49ba8..c049dfc874940 100644
--- a/pandas/tests/util/test_util.py
+++ b/pandas/tests/util/test_util.py
@@ -433,6 +433,26 @@ def teardown_class(cls):
del cls.locales
del cls.current_locale
+ def test_can_set_locale_valid_set(self):
+ # Setting the default locale should return True
+ assert tm.can_set_locale('') is True
+
+ def test_can_set_locale_invalid_set(self):
+ # Setting an invalid locale should return False
+ assert tm.can_set_locale('non-existent_locale') is False
+
+ def test_can_set_locale_invalid_get(self, monkeypatch):
+ # In some cases, an invalid locale can be set,
+ # but a subsequent getlocale() raises a ValueError
+ # See GH 22129
+
+ def mockgetlocale():
+ raise ValueError()
+
+ with monkeypatch.context() as m:
+ m.setattr(locale, 'getlocale', mockgetlocale)
+ assert tm.can_set_locale('') is False
+
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index afc928ddfbb84..39ab498d080bf 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -504,23 +504,19 @@ def set_locale(new_locale, lc_var=locale.LC_ALL):
try:
locale.setlocale(lc_var, new_locale)
-
- try:
- normalized_locale = locale.getlocale()
- except ValueError:
- yield new_locale
+ normalized_locale = locale.getlocale()
+ if com._all_not_none(*normalized_locale):
+ yield '.'.join(normalized_locale)
else:
- if com._all_not_none(*normalized_locale):
- yield '.'.join(normalized_locale)
- else:
- yield new_locale
+ yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
- Check to see if we can set a locale without raising an Exception.
+ Check to see if we can set a locale, and subsequently get the locale,
+ without raising an Exception.
Parameters
----------
@@ -538,7 +534,8 @@ def can_set_locale(lc, lc_var=locale.LC_ALL):
try:
with set_locale(lc, lc_var=lc_var):
pass
- except locale.Error: # horrible name for a Exception subclass
+ except (ValueError,
+ locale.Error): # horrible name for a Exception subclass
return False
else:
return True
| Check that we can also get the locale, after setting it, without raising an Exception.
- [x] closes #22129
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22213 | 2018-08-06T10:52:14Z | 2018-08-13T01:14:36Z | 2018-08-13T01:14:36Z | 2018-09-10T11:02:03Z |
API/CLN: Have toplevel pd.pivot mirror pivot instead of pivot_simple | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cb251d4648925..7ae2107626973 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5346,8 +5346,7 @@ def update(self, other, join='left', overwrite=True, filter_func=None,
# ----------------------------------------------------------------------
# Data reshaping
- def pivot(self, index=None, columns=None, values=None):
- """
+ _shared_docs['pivot'] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
@@ -5357,7 +5356,7 @@ def pivot(self, index=None, columns=None, values=None):
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
- ----------
+ ----------%s
index : string or object, optional
Column to use to make new frame's index. If None, uses
existing index.
@@ -5449,7 +5448,11 @@ def pivot(self, index=None, columns=None, values=None):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
- from pandas.core.reshape.reshape import pivot
+
+ @Substitution('')
+ @Appender(_shared_docs['pivot'])
+ def pivot(self, index=None, columns=None, values=None):
+ from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
diff --git a/pandas/core/reshape/api.py b/pandas/core/reshape/api.py
index 11d69359f5c65..7ac1c0cb52fe3 100644
--- a/pandas/core/reshape/api.py
+++ b/pandas/core/reshape/api.py
@@ -2,7 +2,7 @@
from pandas.core.reshape.concat import concat
from pandas.core.reshape.melt import melt, lreshape, wide_to_long
-from pandas.core.reshape.reshape import pivot_simple as pivot, get_dummies
+from pandas.core.reshape.reshape import get_dummies
from pandas.core.reshape.merge import merge, merge_ordered, merge_asof
-from pandas.core.reshape.pivot import pivot_table, crosstab
+from pandas.core.reshape.pivot import pivot_table, pivot, crosstab
from pandas.core.reshape.tile import cut, qcut
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 0d1caa3d57d73..b525dddeb1ba5 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -10,7 +10,7 @@
from pandas.core.series import Series
from pandas.core.groupby import Grouper
from pandas.core.reshape.util import cartesian_product
-from pandas.core.index import Index, _get_objs_combined_axis
+from pandas.core.index import Index, MultiIndex, _get_objs_combined_axis
from pandas.compat import range, lrange, zip
from pandas import compat
import pandas.core.common as com
@@ -369,6 +369,30 @@ def _convert_by(by):
return by
+@Substitution('\ndata : DataFrame')
+@Appender(_shared_docs['pivot'], indents=1)
+def pivot(data, index=None, columns=None, values=None):
+ if values is None:
+ cols = [columns] if index is None else [index, columns]
+ append = index is None
+ indexed = data.set_index(cols, append=append)
+ else:
+ if index is None:
+ index = data.index
+ else:
+ index = data[index]
+ index = MultiIndex.from_arrays([index, data[columns]])
+
+ if is_list_like(values) and not isinstance(values, tuple):
+ # Exclude tuple because it is seen as a single column name
+ indexed = data._constructor(data[values].values, index=index,
+ columns=values)
+ else:
+ indexed = data._constructor_sliced(data[values].values,
+ index=index)
+ return indexed.unstack(columns)
+
+
def crosstab(index, columns, values=None, rownames=None, colnames=None,
aggfunc=None, margins=False, margins_name='All', dropna=True,
normalize=False):
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index bd5ce4897e9da..50f6e310705d7 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -383,97 +383,6 @@ def _unstack_multiple(data, clocs, fill_value=None):
return unstacked
-def pivot(self, index=None, columns=None, values=None):
- """
- See DataFrame.pivot
- """
- if values is None:
- cols = [columns] if index is None else [index, columns]
- append = index is None
- indexed = self.set_index(cols, append=append)
- else:
- if index is None:
- index = self.index
- else:
- index = self[index]
- index = MultiIndex.from_arrays([index, self[columns]])
-
- if is_list_like(values) and not isinstance(values, tuple):
- # Exclude tuple because it is seen as a single column name
- indexed = self._constructor(self[values].values, index=index,
- columns=values)
- else:
- indexed = self._constructor_sliced(self[values].values,
- index=index)
- return indexed.unstack(columns)
-
-
-def pivot_simple(index, columns, values):
- """
- Produce 'pivot' table based on 3 columns of this DataFrame.
- Uses unique values from index / columns and fills with values.
-
- Parameters
- ----------
- index : ndarray
- Labels to use to make new frame's index
- columns : ndarray
- Labels to use to make new frame's columns
- values : ndarray
- Values to use for populating new frame's values
-
- Notes
- -----
- Obviously, all 3 of the input arguments must have the same length
-
- Returns
- -------
- DataFrame
-
- See also
- --------
- DataFrame.pivot_table : generalization of pivot that can handle
- duplicate values for one index/column pair
- """
- if (len(index) != len(columns)) or (len(columns) != len(values)):
- raise AssertionError('Length of index, columns, and values must be the'
- ' same')
-
- if len(index) == 0:
- return DataFrame(index=[])
-
- hindex = MultiIndex.from_arrays([index, columns])
- series = Series(values.ravel(), index=hindex)
- series = series.sort_index(level=0)
- return series.unstack()
-
-
-def _slow_pivot(index, columns, values):
- """
- Produce 'pivot' table based on 3 columns of this DataFrame.
- Uses unique values from index / columns and fills with values.
-
- Parameters
- ----------
- index : string or object
- Column name to use to make new frame's index
- columns : string or object
- Column name to use to make new frame's columns
- values : string or object
- Column name to use for populating new frame's values
-
- Could benefit from some Cython here.
- """
- tree = {}
- for i, (idx, col) in enumerate(zip(index, columns)):
- if col not in tree:
- tree[col] = {}
- branch = tree[col]
- branch[idx] = values[i]
-
- return DataFrame(tree)
-
-
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index e3d5880eebd48..e66758f58b1d4 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -301,13 +301,17 @@ def test_pivot_multi_functions(self):
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
- def test_pivot_index_with_nan(self):
+ @pytest.mark.parametrize('method', [True, False])
+ def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
- result = df.pivot('a', 'b', 'c')
+ if method:
+ result = df.pivot('a', 'b', 'c')
+ else:
+ result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
@@ -322,15 +326,23 @@ def test_pivot_index_with_nan(self):
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
- pv = df.pivot('a', 'b', 'c')
+ if method:
+ pv = df.pivot('a', 'b', 'c')
+ else:
+ pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
- tm.assert_frame_equal(df.pivot('b', 'a', 'c'), pv.T)
+ if method:
+ result = df.pivot('b', 'a', 'c')
+ else:
+ result = pd.pivot(df, 'b', 'a', 'c')
+ tm.assert_frame_equal(result, pv.T)
- def test_pivot_with_tz(self):
+ @pytest.mark.parametrize('method', [True, False])
+ def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
@@ -358,7 +370,10 @@ def test_pivot_with_tz(self):
tz='US/Pacific'),
columns=exp_col)
- pv = df.pivot(index='dt1', columns='dt2')
+ if method:
+ pv = df.pivot(index='dt1', columns='dt2')
+ else:
+ pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
@@ -371,10 +386,14 @@ def test_pivot_with_tz(self):
name='dt2',
tz='Asia/Tokyo'))
- pv = df.pivot(index='dt1', columns='dt2', values='data1')
+ if method:
+ pv = df.pivot(index='dt1', columns='dt2', values='data1')
+ else:
+ pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
- def test_pivot_periods(self):
+ @pytest.mark.parametrize('method', [True, False])
+ def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
@@ -394,8 +413,10 @@ def test_pivot_periods(self):
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
-
- pv = df.pivot(index='p1', columns='p2')
+ if method:
+ pv = df.pivot(index='p1', columns='p2')
+ else:
+ pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
@@ -403,22 +424,28 @@ def test_pivot_periods(self):
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
-
- pv = df.pivot(index='p1', columns='p2', values='data1')
+ if method:
+ pv = df.pivot(index='p1', columns='p2', values='data1')
+ else:
+ pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
- def test_pivot_with_list_like_values(self, values):
+ @pytest.mark.parametrize('method', [True, False])
+ def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
- result = df.pivot(index='foo', columns='bar', values=values)
+ if method:
+ result = df.pivot(index='foo', columns='bar', values=values)
+ else:
+ result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
@@ -434,14 +461,18 @@ def test_pivot_with_list_like_values(self, values):
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
- def test_pivot_with_list_like_values_nans(self, values):
+ @pytest.mark.parametrize('method', [True, False])
+ def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
- result = df.pivot(index='zoo', columns='foo', values=values)
+ if method:
+ result = df.pivot(index='zoo', columns='foo', values=values)
+ else:
+ result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
@@ -460,7 +491,8 @@ def test_pivot_with_list_like_values_nans(self, values):
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966',
strict=True)
- def test_pivot_with_multiindex(self):
+ @pytest.mark.parametrize('method', [True, False])
+ def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
@@ -472,8 +504,15 @@ def test_pivot_with_multiindex(self):
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
- result = df.pivot(index=('bar', 'first'), columns=('bar', 'second'),
- values=('baz', 'first'))
+ if method:
+ result = df.pivot(index=('bar', 'first'),
+ columns=('bar', 'second'),
+ values=('baz', 'first'))
+ else:
+ result = pd.pivot(df,
+ index=('bar', 'first'),
+ columns=('bar', 'second'),
+ values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
@@ -481,7 +520,8 @@ def test_pivot_with_multiindex(self):
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
- def test_pivot_with_tuple_of_values(self):
+ @pytest.mark.parametrize('method', [True, False])
+ def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
@@ -489,7 +529,10 @@ def test_pivot_with_tuple_of_values(self):
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
- df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
+ if method:
+ df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
+ else:
+ pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 2f8bc228cf86e..b968c52ce3dfd 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -10,7 +10,7 @@
from pandas.core.dtypes.common import is_float_dtype
from pandas import (Series, DataFrame, Index, date_range, isna, notna,
- pivot, MultiIndex)
+ MultiIndex)
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
@@ -2676,30 +2676,6 @@ def test_join(self):
pytest.raises(Exception, lp1.join,
self.panel.filter(['ItemB', 'ItemC']))
- def test_pivot(self):
- with catch_warnings(record=True):
- from pandas.core.reshape.reshape import _slow_pivot
-
- one, two, three = (np.array([1, 2, 3, 4, 5]),
- np.array(['a', 'b', 'c', 'd', 'e']),
- np.array([1, 2, 3, 5, 4.]))
- df = pivot(one, two, three)
- assert df['a'][1] == 1
- assert df['b'][2] == 2
- assert df['c'][3] == 3
- assert df['d'][4] == 5
- assert df['e'][5] == 4
- assert_frame_equal(df, _slow_pivot(one, two, three))
-
- # weird overlap, TODO: test?
- a, b, c = (np.array([1, 2, 3, 4, 4]),
- np.array(['a', 'a', 'a', 'a', 'a']),
- np.array([1., 2., 3., 4., 5.]))
- pytest.raises(Exception, pivot, a, b, c)
-
- # corner case, empty
- df = pivot(np.array([]), np.array([]), np.array([]))
-
def test_panel_index():
index = panelm.panel_index([1, 2, 3, 4], [1, 2, 3])
| - [x] closes #22116
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- Moved `pivot` from `pandas/core/reshape/reshape.py` to `pandas/core/reshape/pivot.py`
- Moved `pivot_simple` and `_slow_pivot` from `pandas/core/reshape/reshape.py` to `pandas/core/reshape/pivot.py` (only used by one panel test)
- Remapped top level `pd.pivot` to use `pivot` instead of `pivot_simple`
- Added some additional tests for `pd.pivot`
| https://api.github.com/repos/pandas-dev/pandas/pulls/22209 | 2018-08-06T03:20:55Z | 2018-08-08T10:41:19Z | 2018-08-08T10:41:19Z | 2018-08-16T14:39:07Z |
BUG: treat nan-objects the same way float64-nans are treated - all na… | diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index a5ccf5c32b876..34a8d552304d1 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -38,6 +38,43 @@ def time_isin(self, dtypes):
self.s.isin(self.values)
+class IsInForObjects(object):
+
+ def setup(self):
+ self.s_nans = Series(np.full(10**4, np.nan)).astype(np.object)
+ self.vals_nans = np.full(10**4, np.nan).astype(np.object)
+ self.s_short = Series(np.arange(2)).astype(np.object)
+ self.s_long = Series(np.arange(10**5)).astype(np.object)
+ self.vals_short = np.arange(2).astype(np.object)
+ self.vals_long = np.arange(10**5).astype(np.object)
+ # because of nans floats are special:
+ self.s_long_floats = Series(np.arange(10**5,
+ dtype=np.float)).astype(np.object)
+ self.vals_long_floats = np.arange(10**5,
+ dtype=np.float).astype(np.object)
+
+ def time_isin_nans(self):
+ # if nan-objects are different objects,
+ # this has the potential to trigger O(n^2) running time
+ self.s_nans.isin(self.vals_nans)
+
+ def time_isin_short_series_long_values(self):
+ # running time dominated by the preprocessing
+ self.s_short.isin(self.vals_long)
+
+ def time_isin_long_series_short_values(self):
+ # running time dominated by look-up
+ self.s_long.isin(self.vals_short)
+
+ def time_isin_long_series_long_values(self):
+ # no dominating part
+ self.s_long.isin(self.vals_long)
+
+ def time_isin_long_series_long_values_floats(self):
+ # no dominating part
+ self.s_long_floats.isin(self.vals_long_floats)
+
+
class NSort(object):
goal_time = 0.2
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 42e286f487a7d..a513c2bd03ac7 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -512,6 +512,7 @@ Missing
- Bug in :func:`DataFrame.fillna` where a ``ValueError`` would raise when one column contained a ``datetime64[ns, tz]`` dtype (:issue:`15522`)
- Bug in :func:`Series.hasnans` that could be incorrectly cached and return incorrect answers if null elements are introduced after an initial call (:issue:`19700`)
+- :func:`Series.isin` now treats all nans as equal also for `np.object`-dtype. This behavior is consistent with the behavior for float64 (:issue:`22119`)
MultiIndex
^^^^^^^^^^
diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h
index e9fb49e8a5e42..45a93051f78d3 100644
--- a/pandas/_libs/src/klib/khash_python.h
+++ b/pandas/_libs/src/klib/khash_python.h
@@ -47,10 +47,19 @@ int PANDAS_INLINE pyobject_cmp(PyObject* a, PyObject* b) {
PyErr_Clear();
return 0;
}
+ if (result == 0) { // still could be two NaNs
+ return PyFloat_CheckExact(a) &&
+ PyFloat_CheckExact(b) &&
+ Py_IS_NAN(PyFloat_AS_DOUBLE(a)) &&
+ Py_IS_NAN(PyFloat_AS_DOUBLE(b));
+ }
return result;
}
-
+// For PyObject_Hash holds:
+// hash(0.0) == 0 == hash(-0.0)
+// hash(X) == 0 if X is a NaN-value
+// so it is OK to use it directly
#define kh_python_hash_func(key) (PyObject_Hash(key))
#define kh_python_hash_equal(a, b) (pyobject_cmp(a, b))
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 754703dfc4bee..8ed8d2014f5bc 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -13,7 +13,8 @@
from pandas.tests.indexes.common import Base
from pandas.compat import (range, lrange, lzip, u,
- text_type, zip, PY3, PY35, PY36, PYPY, StringIO)
+ text_type, zip, PY3, PY35, PY36, StringIO)
+import math
import operator
import numpy as np
@@ -1661,9 +1662,13 @@ def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
# Test cartesian product of null fixtures and ensure that we don't
# mangle the various types (save a corner case with PyPy)
- if PYPY and nulls_fixture is np.nan: # np.nan is float('nan') on PyPy
+ # all nans are the same
+ if (isinstance(nulls_fixture, float) and
+ isinstance(nulls_fixture2, float) and
+ math.isnan(nulls_fixture) and
+ math.isnan(nulls_fixture2)):
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
- [float('nan')]), np.array([False, True]))
+ [nulls_fixture2]), np.array([False, True]))
elif nulls_fixture is nulls_fixture2: # should preserve NA type
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 7ce2aaf7d7fbb..d8d98318b4c72 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -623,6 +623,14 @@ def test_empty(self, empty):
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
+ def test_different_nan_objects(self):
+ # GH 22119
+ comps = np.array(['nan', np.nan * 1j, float('nan')], dtype=np.object)
+ vals = np.array([float('nan')], dtype=np.object)
+ expected = np.array([False, False, True])
+ result = algos.isin(comps, vals)
+ tm.assert_numpy_array_equal(expected, result)
+
class TestValueCounts(object):
| …ns are from the same equivalence class (GH22119)
- [x] closes #22119
- [x] closes #22148
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22207 | 2018-08-05T21:03:42Z | 2018-08-09T10:40:05Z | 2018-08-09T10:40:04Z | 2018-08-09T19:34:12Z |
STYLE/LINT: Set literals | diff --git a/ci/lint.sh b/ci/lint.sh
index ec99e1e559d6e..0d0ddadafbea9 100755
--- a/ci/lint.sh
+++ b/ci/lint.sh
@@ -13,7 +13,6 @@ if [ "$LINT" ]; then
#E731, # do not assign a lambda expression, use a def
#E741, # do not use variables named 'l', 'O', or 'I'
#W503, # line break before binary operator
- #C405, # Unnecessary (list/tuple) literal - rewrite as a set literal.
#C406, # Unnecessary (list/tuple) literal - rewrite as a dict literal.
#C408, # Unnecessary (dict/list/tuple) call - rewrite as a literal.
#C409, # Unnecessary (list/tuple) passed to tuple() - (remove the outer call to tuple()/rewrite as a tuple literal).
@@ -21,35 +20,35 @@ if [ "$LINT" ]; then
# pandas/_libs/src is C code, so no need to search there.
echo "Linting *.py"
- flake8 pandas --filename=*.py --exclude pandas/_libs/src --ignore=C405,C406,C408,C409,C410,E402,E731,E741,W503
+ flake8 pandas --filename=*.py --exclude pandas/_libs/src --ignore=C406,C408,C409,C410,E402,E731,E741,W503
if [ $? -ne "0" ]; then
RET=1
fi
echo "Linting *.py DONE"
echo "Linting setup.py"
- flake8 setup.py --ignore=C405,C406,C408,C409,C410,E402,E731,E741,W503
+ flake8 setup.py --ignore=C406,C408,C409,C410,E402,E731,E741,W503
if [ $? -ne "0" ]; then
RET=1
fi
echo "Linting setup.py DONE"
echo "Linting asv_bench/benchmarks/"
- flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/*.py --ignore=F811,C405,C406,C408,C409,C410
+ flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/*.py --ignore=F811,C406,C408,C409,C410
if [ $? -ne "0" ]; then
RET=1
fi
echo "Linting asv_bench/benchmarks/*.py DONE"
echo "Linting scripts/*.py"
- flake8 scripts --filename=*.py --ignore=C405,C406,C408,C409,C410,E402,E731,E741,W503
+ flake8 scripts --filename=*.py --ignore=C406,C408,C409,C410,E402,E731,E741,W503
if [ $? -ne "0" ]; then
RET=1
fi
echo "Linting scripts/*.py DONE"
echo "Linting doc scripts"
- flake8 doc/make.py doc/source/conf.py --ignore=C405,C406,C408,C409,C410,E402,E731,E741,W503
+ flake8 doc/make.py doc/source/conf.py --ignore=C406,C408,C409,C410,E402,E731,E741,W503
if [ $? -ne "0" ]; then
RET=1
fi
diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx
index 148018ece20e2..72a0baa763c8a 100644
--- a/pandas/_libs/ops.pyx
+++ b/pandas/_libs/ops.pyx
@@ -260,8 +260,8 @@ def maybe_convert_bool(ndarray[object] arr,
result = np.empty(n, dtype=np.uint8)
# the defaults
- true_vals = set(('True', 'TRUE', 'true'))
- false_vals = set(('False', 'FALSE', 'false'))
+ true_vals = {'True', 'TRUE', 'true'}
+ false_vals = {'False', 'FALSE', 'false'}
if true_values is not None:
true_vals = true_vals | set(true_values)
diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx
index 5c8efa8c03712..7e9e8b720872d 100644
--- a/pandas/_libs/tslibs/frequencies.pyx
+++ b/pandas/_libs/tslibs/frequencies.pyx
@@ -124,7 +124,7 @@ _lite_rule_alias = {
'us': 'U',
'ns': 'N'}
-_dont_uppercase = set(('MS', 'ms'))
+_dont_uppercase = {'MS', 'ms'}
# ----------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 2fe8fab2e2e19..20b43f9d5644b 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -23,7 +23,7 @@ from util cimport (get_nat,
# ----------------------------------------------------------------------
# Constants
-nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN'])
+nat_strings = {'NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN'}
cdef int64_t NPY_NAT = get_nat()
iNaT = NPY_NAT # python-visible constant
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 3ba2270a851d5..2efe506d2c154 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -252,12 +252,10 @@ def _validate_business_time(t_input):
# ---------------------------------------------------------------------
# Constructor Helpers
-relativedelta_kwds = set([
- 'years', 'months', 'weeks', 'days',
- 'year', 'month', 'day', 'weekday',
- 'hour', 'minute', 'second', 'microsecond',
- 'nanosecond', 'nanoseconds',
- 'hours', 'minutes', 'seconds', 'microseconds'])
+relativedelta_kwds = {'years', 'months', 'weeks', 'days', 'year', 'month',
+ 'day', 'weekday', 'hour', 'minute', 'second',
+ 'microsecond', 'nanosecond', 'nanoseconds', 'hours',
+ 'minutes', 'seconds', 'microseconds'}
def _determine_offset(kwds):
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 811f0d25c3838..f57db1076e6e1 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1973,6 +1973,6 @@ def _validate_end_alias(how):
'START': 'S', 'FINISH': 'E',
'BEGIN': 'S', 'END': 'E'}
how = how_dict.get(str(how).upper())
- if how not in set(['S', 'E']):
+ if how not in {'S', 'E'}:
raise ValueError('How must be one of S or E')
return how
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 76614454e5a10..90df596b98296 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -25,7 +25,7 @@
from . import ExtensionArray, Categorical
-_VALID_CLOSED = set(['left', 'right', 'both', 'neither'])
+_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_interval_shared_docs = {}
_shared_docs_kwargs = dict(
klass='IntervalArray',
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index ac552e7b80de3..c12056a3ee78c 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -24,8 +24,8 @@
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
- 'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),
- 'where': set(['int64', 'float64', 'bool'])
+ 'evaluate': {'int64', 'int32', 'float64', 'float32', 'bool'},
+ 'where': {'int64', 'float64', 'bool'}
}
# the minimum prod shape that we will use numexpr
@@ -81,7 +81,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check):
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
- dtypes |= set([o.dtype.name])
+ dtypes |= {o.dtype.name}
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 5768fd361c3db..c1aab961dcc9f 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -188,8 +188,8 @@ def is_nonempty(x):
typs = get_dtype_kinds(to_concat)
if len(typs) != 1:
- if (not len(typs - set(['i', 'u', 'f'])) or
- not len(typs - set(['bool', 'i', 'u']))):
+ if (not len(typs - {'i', 'u', 'f'}) or
+ not len(typs - {'bool', 'i', 'u'})):
# let numpy coerce
pass
else:
@@ -600,7 +600,7 @@ def convert_sparse(x, axis):
to_concat = [convert_sparse(x, axis) for x in to_concat]
result = np.concatenate(to_concat, axis=axis)
- if not len(typs - set(['sparse', 'f', 'i'])):
+ if not len(typs - {'sparse', 'f', 'i'}):
# sparsify if inputs are sparse and dense numerics
# first sparse input's fill_value and SparseIndex is used
result = SparseArray(result.ravel(), fill_value=fill_values[0],
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 36cdfbd3b3479..35d4a024a4e6c 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -481,7 +481,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
if key.key is None:
return grouper, [], obj
else:
- return grouper, set([key.key]), obj
+ return grouper, {key.key}, obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2a191ef76473b..90d87e6f1cc23 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -238,7 +238,7 @@ class Index(IndexOpsMixin, PandasObject):
_engine_type = libindex.ObjectEngine
- _accessors = set(['str'])
+ _accessors = {'str'}
str = CachedAccessor("str", StringMethods)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 838b12468e85e..364eea8fb8a3a 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -44,7 +44,7 @@
from pandas.core.arrays.interval import (IntervalArray,
_interval_shared_docs)
-_VALID_CLOSED = set(['left', 'right', 'both', 'neither'])
+_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index fd27e3ba650ea..81d1e83ee6870 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -716,7 +716,7 @@ def dropna(self, axis=0, how='any', inplace=False):
values = self.values
mask = notna(values)
- for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
+ for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 32251430deec7..08bfec89a22a8 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1199,7 +1199,7 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean',
freq = to_offset(freq)
- end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'])
+ end_types = {'M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'}
rule = freq.rule_code
if (rule in end_types or
('-' in rule and rule[:rule.find('-')] in end_types)):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 21dea15772cc0..b84179875db1f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -163,7 +163,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
Copy input data
"""
_metadata = ['name']
- _accessors = set(['dt', 'cat', 'str'])
+ _accessors = {'dt', 'cat', 'str'}
_deprecations = generic.NDFrame._deprecations | frozenset(
['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value',
'from_csv', 'valid'])
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 39ecf7f49bc2e..6deec52811aff 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -1082,7 +1082,7 @@ def str_get_dummies(arr, sep='|'):
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
- tags = sorted(tags - set([""]))
+ tags = sorted(tags - {""})
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 17dda903cdadb..69cb9ed46419c 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -23,10 +23,9 @@
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
-_NA_VALUES = set([
- '-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A',
- 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan', '-nan', ''
-])
+_NA_VALUES = {'-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A',
+ 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan',
+ '-nan', ''}
if compat.PY3:
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 6fabd2573a7b4..9faac6cd09218 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -268,7 +268,7 @@ def _save_header(self):
# Write out the index line if it's not empty.
# Otherwise, we will print out an extraneous
# blank line between the mi and the data rows.
- if encoded_labels and set(encoded_labels) != set(['']):
+ if encoded_labels and set(encoded_labels) != {''}:
encoded_labels.extend([''] * len(columns))
writer.writerow(encoded_labels)
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index be6ff898ec2b7..e0074e2cf3aef 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -338,7 +338,7 @@ def f(t):
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
- used_legends = set([])
+ used_legends = set()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
@@ -518,7 +518,7 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
else:
df = frame[cols]
- used_legends = set([])
+ used_legends = set()
ncols = len(df.columns)
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 5f1f6dc5bca87..dc330666b4b6c 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -66,7 +66,7 @@ def __getitem__(self):
"ll",
[
[], [1], (1, ), (1, 2), {'a': 1},
- set([1, 'a']), Series([1]),
+ {1, 'a'}, Series([1]),
Series([]), Series(['a']).str,
np.array([2])])
def test_is_list_like_passes(ll):
@@ -97,7 +97,7 @@ class DtypeList(list):
@pytest.mark.parametrize('inner', [
- [], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
+ [], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
@@ -293,7 +293,7 @@ def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
- na_values = set(['', 'NULL', 'nan'])
+ na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
@@ -332,7 +332,7 @@ def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
- nan_values = set([-999, -999.0])
+ nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
@@ -385,7 +385,7 @@ def test_convert_numeric_uint64_nan(self, coerce, arr):
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
- na_values = set([2**63])
+ na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py
index c5d771f52b6ac..3134686c2a2d9 100644
--- a/pandas/tests/frame/test_rank.py
+++ b/pandas/tests/frame/test_rank.py
@@ -274,7 +274,7 @@ def _check2d(df, expected, method='average', axis=0):
result = df.rank(method=method, axis=axis)
assert_frame_equal(result, exp_df)
- disabled = set([(object, 'first')])
+ disabled = {(object, 'first')}
if (dtype, method) in disabled:
return
frame = df if dtype is None else df.astype(dtype)
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index a77c170221bea..cbf1bdbce9574 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -28,9 +28,9 @@ def _axify(obj, key, axis):
class Base(object):
""" indexing comprehensive base class """
- _objs = set(['series', 'frame', 'panel'])
- _typs = set(['ints', 'uints', 'labels', 'mixed',
- 'ts', 'floats', 'empty', 'ts_rev', 'multi'])
+ _objs = {'series', 'frame', 'panel'}
+ _typs = {'ints', 'uints', 'labels', 'mixed', 'ts', 'floats', 'empty',
+ 'ts_rev', 'multi'}
def setup_method(self, method):
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 3218742aa7636..76bdf141ec828 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -244,7 +244,7 @@ def test_repr_obeys_max_seq_limit(self):
assert len(printing.pprint_thing(lrange(1000))) < 100
def test_repr_set(self):
- assert printing.pprint_thing(set([1])) == '{1}'
+ assert printing.pprint_thing({1}) == '{1}'
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather then
diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py
index d2c3f82e95c4d..9c68770e06b02 100644
--- a/pandas/tests/io/parser/na_values.py
+++ b/pandas/tests/io/parser/na_values.py
@@ -69,9 +69,9 @@ def test_non_string_na_values(self):
tm.assert_frame_equal(out, expected)
def test_default_na_values(self):
- _NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
- '#N/A', 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null',
- 'NaN', 'nan', '-NaN', '-nan', '#N/A N/A', ''])
+ _NA_VALUES = {'-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A',
+ 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', 'nan',
+ '-NaN', '-nan', '#N/A N/A', ''}
assert _NA_VALUES == com._NA_VALUES
nv = len(_NA_VALUES)
diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py
index 919b357f14236..1bf055854de88 100644
--- a/pandas/tests/io/parser/parse_dates.py
+++ b/pandas/tests/io/parser/parse_dates.py
@@ -455,7 +455,7 @@ def test_read_with_parse_dates_invalid_type(self):
self.read_csv, StringIO(data),
parse_dates=np.array([4, 5]))
tm.assert_raises_regex(TypeError, errmsg, self.read_csv,
- StringIO(data), parse_dates=set([1, 3, 3]))
+ StringIO(data), parse_dates={1, 3, 3})
def test_parse_dates_empty_string(self):
# see gh-2263
diff --git a/pandas/tests/io/parser/usecols.py b/pandas/tests/io/parser/usecols.py
index 584711528e9cb..db01c20a56e9f 100644
--- a/pandas/tests/io/parser/usecols.py
+++ b/pandas/tests/io/parser/usecols.py
@@ -413,7 +413,7 @@ def test_empty_usecols(self):
# should not raise
data = 'a,b,c\n1,2,3\n4,5,6'
expected = DataFrame()
- result = self.read_csv(StringIO(data), usecols=set([]))
+ result = self.read_csv(StringIO(data), usecols=set())
tm.assert_frame_equal(result, expected)
def test_np_array_usecols(self):
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index b78c4f27d8c3f..e08899a03d2d7 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -170,8 +170,8 @@ def test_skiprows_list(self):
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
- df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=set([1, 2]))
- df2 = self.read_html(self.spam_data, 'Unit', skiprows=set([2, 1]))
+ df1 = self.read_html(self.spam_data, '.*Water.*', skiprows={1, 2})
+ df2 = self.read_html(self.spam_data, 'Unit', skiprows={2, 1})
assert_framelist_equal(df1, df2)
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index db8306d6dcb77..99c3c659e9b4d 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -369,7 +369,7 @@ def test_keys(self):
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
assert len(store) == 5
- expected = set(['/a', '/b', '/c', '/d', '/foo/bar'])
+ expected = {'/a', '/b', '/c', '/d', '/foo/bar'}
assert set(store.keys()) == expected
assert set(store) == expected
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 77dd06cccc532..824e5a2b23df3 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -1007,7 +1007,7 @@ def test_query_by_text_obj(self):
iris_df = sql.read_sql(name_text, self.conn, params={
'name': 'Iris-versicolor'})
all_names = set(iris_df['Name'])
- assert all_names == set(['Iris-versicolor'])
+ assert all_names == {'Iris-versicolor'}
def test_query_by_select_obj(self):
# WIP : GH10846
@@ -1018,7 +1018,7 @@ def test_query_by_select_obj(self):
iris_df = sql.read_sql(name_select, self.conn,
params={'name': 'Iris-setosa'})
all_names = set(iris_df['Name'])
- assert all_names == set(['Iris-setosa'])
+ assert all_names == {'Iris-setosa'}
class _EngineToConnMixin(object):
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index c1891430683da..f8420b302836e 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -135,19 +135,19 @@ def test_concat_empty_series_dtypes_roundtrips(self):
Series(dtype=dtype)]).dtype == dtype
def int_result_type(dtype, dtype2):
- typs = set([dtype.kind, dtype2.kind])
- if not len(typs - set(['i', 'u', 'b'])) and (dtype.kind == 'i' or
- dtype2.kind == 'i'):
+ typs = {dtype.kind, dtype2.kind}
+ if not len(typs - {'i', 'u', 'b'}) and (dtype.kind == 'i' or
+ dtype2.kind == 'i'):
return 'i'
- elif not len(typs - set(['u', 'b'])) and (dtype.kind == 'u' or
- dtype2.kind == 'u'):
+ elif not len(typs - {'u', 'b'}) and (dtype.kind == 'u' or
+ dtype2.kind == 'u'):
return 'u'
return None
def float_result_type(dtype, dtype2):
- typs = set([dtype.kind, dtype2.kind])
- if not len(typs - set(['f', 'i', 'u'])) and (dtype.kind == 'f' or
- dtype2.kind == 'f'):
+ typs = {dtype.kind, dtype2.kind}
+ if not len(typs - {'f', 'i', 'u'}) and (dtype.kind == 'f' or
+ dtype2.kind == 'f'):
return 'f'
return None
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 145682e5be863..d2fbd69a2a08f 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -952,7 +952,7 @@ def test_constructor_dict_of_tuples(self):
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
- values = set([1, 2, 3, 4, 5])
+ values = {1, 2, 3, 4, 5}
pytest.raises(TypeError, Series, values)
values = frozenset(values)
pytest.raises(TypeError, Series, values)
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index 42f2d45df2def..e9382700af989 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -261,7 +261,7 @@ def _check(s, expected, method='average'):
tm.assert_series_equal(result, Series(expected))
dtypes = [None, object]
- disabled = set([(object, 'first')])
+ disabled = {(object, 'first')}
results = self.results
for method, dtype in product(results, dtypes):
@@ -279,7 +279,7 @@ def test_rank_tie_methods_on_infs_nans(self, method, na_option, ascending):
dtypes = [('object', None, Infinity(), NegInfinity()),
('float64', np.nan, np.inf, -np.inf)]
chunk = 3
- disabled = set([('object', 'first')])
+ disabled = {('object', 'first')}
def _check(s, method, na_option, ascending):
exp_ranks = {
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 58f2f41f3681c..214352f862833 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -548,7 +548,7 @@ def test_basic(self):
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
- result = algos.isin(Series([1, 2]), set([1]))
+ result = algos.isin(Series([1, 2]), {1})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@@ -560,7 +560,7 @@ def test_basic(self):
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
- result = algos.isin(Series(['a', 'b']), set(['a']))
+ result = algos.isin(Series(['a', 'b']), {'a'})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 56e00fa8af23d..2de7585ccedbd 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -108,7 +108,7 @@ def test_integer_arithmetic(self):
check_dtype=True)
def run_binary(self, df, other, assert_func, test_flex=False,
- numexpr_ops=set(['gt', 'lt', 'ge', 'le', 'eq', 'ne'])):
+ numexpr_ops={'gt', 'lt', 'ge', 'le', 'eq', 'ne'}):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index fc3b13a37fcdb..ec6d83062c8b0 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -1992,7 +1992,7 @@ def no_nans(x):
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
- for min_periods in set([0, 1, 2, 3, 4, window]):
+ for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 9d41401a7eefc..c74b7454a67e3 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -274,9 +274,8 @@ def apply_index(self, i):
"implementation".format(
name=self.__class__.__name__))
kwds = self.kwds
- relativedelta_fast = set(['years', 'months', 'weeks',
- 'days', 'hours', 'minutes',
- 'seconds', 'microseconds'])
+ relativedelta_fast = {'years', 'months', 'weeks', 'days', 'hours',
+ 'minutes', 'seconds', 'microseconds'}
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(kwds).issubset(relativedelta_fast)):
@@ -318,7 +317,7 @@ def isAnchored(self):
# set of attributes on each object rather than the existing behavior of
# iterating over internal ``__dict__``
def _repr_attrs(self):
- exclude = set(['n', 'inc', 'normalize'])
+ exclude = {'n', 'inc', 'normalize'}
attrs = []
for attr in sorted(self.__dict__):
if attr.startswith('_') or attr == 'kwds':
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 2225daf10d90f..afc928ddfbb84 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -909,8 +909,8 @@ def repr_class(x):
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
- types = set([type(left).__name__, type(right).__name__])
- if len(types - set(['Int64Index', 'RangeIndex'])):
+ types = {type(left).__name__, type(right).__name__}
+ if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py
index 8f0c554b8aa9d..e144f5187ac9f 100755
--- a/scripts/find_commits_touching_func.py
+++ b/scripts/find_commits_touching_func.py
@@ -135,7 +135,7 @@ def search(defname, head_commit="HEAD"):
# seed with hits from q
allhits = set(get_hits(defname, files=files))
- q = set([HEAD])
+ q = {HEAD}
try:
while q:
h = q.pop()
| xref #22122
Use `{1, 2, 3}` instead of `set([1, 2, 3])` and enforce with a linting rule.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22202 | 2018-08-05T07:08:38Z | 2018-08-06T03:00:18Z | 2018-08-06T03:00:18Z | 2018-08-06T04:33:55Z |
CLN: Old timezone issues | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 5c15c7b6a742f..f419e2d06239c 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -561,6 +561,10 @@ Timezones
- Fixed bug where :meth:`DataFrame.describe` and :meth:`Series.describe` on tz-aware datetimes did not show `first` and `last` result (:issue:`21328`)
- Bug in :class:`DatetimeIndex` comparisons failing to raise ``TypeError`` when comparing timezone-aware ``DatetimeIndex`` against ``np.datetime64`` (:issue:`22074`)
- Bug in ``DataFrame`` assignment with a timezone-aware scalar (:issue:`19843`)
+- Bug when constructing a :class:`DatetimeIndex` with :class:`Timestamp`s constructed with the ``replace`` method across DST (:issue:`18785`)
+- Bug when setting a new value with :meth:`DataFrame.loc` with a :class:`DatetimeIndex` with a DST transition (:issue:`18308`, :issue:`20724`)
+- Bug in :meth:`DatetimeIndex.unique` that did not re-localize tz-aware dates correctly (:issue:`21737`)
+- Bug when indexing a :class:`Series` with a DST transition (:issue:`21846`)
Offsets
^^^^^^^
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index 5653943c37e37..b6f27cbdd1b89 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -503,6 +503,24 @@ def test_construction_int_rountrip(self, tz_naive_fixture):
expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0]
assert result == expected
+ def test_construction_from_replaced_timestamps_with_dst(self):
+ # GH 18785
+ index = pd.date_range(pd.Timestamp(2000, 1, 1),
+ pd.Timestamp(2005, 1, 1),
+ freq='MS', tz='Australia/Melbourne')
+ test = pd.DataFrame({'data': range(len(index))}, index=index)
+ test = test.resample('Y').mean()
+ result = pd.DatetimeIndex([x.replace(month=6, day=1)
+ for x in test.index])
+ expected = pd.DatetimeIndex(['2000-06-01 00:00:00',
+ '2001-06-01 00:00:00',
+ '2002-06-01 00:00:00',
+ '2003-06-01 00:00:00',
+ '2004-06-01 00:00:00',
+ '2005-06-01 00:00:00'],
+ tz='Australia/Melbourne')
+ tm.assert_index_equal(result, expected)
+
class TestTimeSeries(object):
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 2adf09924a509..db3de0ceced0c 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -394,3 +394,6 @@ def test_factorize_dst(self):
def test_unique(self, arr, expected):
result = arr.unique()
tm.assert_index_equal(result, expected)
+ # GH 21737
+ # Ensure the underlying data is consistent
+ assert result[0] == expected[0]
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 751372380d262..df59390475da8 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -1,5 +1,9 @@
+from datetime import datetime, timedelta
+
import numpy as np
import pandas as pd
+from dateutil import tz
+
from pandas import date_range, Index, DataFrame, Series, Timestamp
from pandas.util import testing as tm
@@ -266,3 +270,46 @@ def test_nanosecond_getitem_setitem_with_tz(self):
result.loc[df.index[0], 'a'] = -1
expected = DataFrame(-1, index=index, columns=['a'])
tm.assert_frame_equal(result, expected)
+
+ def test_loc_getitem_across_dst(self):
+ # GH 21846
+ idx = pd.date_range('2017-10-29 01:30:00',
+ tz='Europe/Berlin', periods=5, freq='30 min')
+ series2 = pd.Series([0, 1, 2, 3, 4],
+ index=idx)
+
+ t_1 = pd.Timestamp('2017-10-29 02:30:00+02:00', tz='Europe/Berlin',
+ freq='30min')
+ t_2 = pd.Timestamp('2017-10-29 02:00:00+01:00', tz='Europe/Berlin',
+ freq='30min')
+ result = series2.loc[t_1:t_2]
+ expected = pd.Series([2, 3], index=idx[2:4])
+ tm.assert_series_equal(result, expected)
+
+ result = series2[t_1]
+ expected = 2
+ assert result == expected
+
+ def test_loc_incremental_setitem_with_dst(self):
+ # GH 20724
+ base = datetime(2015, 11, 1, tzinfo=tz.gettz("US/Pacific"))
+ idxs = [base + timedelta(seconds=i * 900) for i in range(16)]
+ result = pd.Series([0], index=[idxs[0]])
+ for ts in idxs:
+ result.loc[ts] = 1
+ expected = pd.Series(1, index=idxs)
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_setitem_with_existing_dst(self):
+ # GH 18308
+ start = pd.Timestamp('2017-10-29 00:00:00+0200', tz='Europe/Madrid')
+ end = pd.Timestamp('2017-10-29 03:00:00+0100', tz='Europe/Madrid')
+ ts = pd.Timestamp('2016-10-10 03:00:00', tz='Europe/Madrid')
+ idx = pd.date_range(start, end, closed='left', freq="H")
+ result = pd.DataFrame(index=idx, columns=['value'])
+ result.loc[ts, 'value'] = 12
+ expected = pd.DataFrame([np.nan] * len(idx) + [12],
+ index=idx.append(pd.DatetimeIndex([ts])),
+ columns=['value'],
+ dtype=object)
+ tm.assert_frame_equal(result, expected)
| - [x] closes #21846
- [x] closes #21737
- [x] closes #20724
- [x] closes #18785
- [x] closes #18308
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22201 | 2018-08-05T06:39:39Z | 2018-08-06T10:23:27Z | 2018-08-06T10:23:27Z | 2018-08-06T17:22:47Z |
BUG: Infer compression by default in read_fwf() (#22199) | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index a2abda019812a..d41293d7655a0 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1528,6 +1528,7 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form
- Bug in :meth:`DataFrame.to_dict` when the resulting dict contains non-Python scalars in the case of numeric data (:issue:`23753`)
- :func:`DataFrame.to_string()`, :func:`DataFrame.to_html()`, :func:`DataFrame.to_latex()` will correctly format output when a string is passed as the ``float_format`` argument (:issue:`21625`, :issue:`22270`)
- Bug in :func:`read_csv` that caused it to raise ``OverflowError`` when trying to use 'inf' as ``na_value`` with integer index column (:issue:`17128`)
+- Bug in :func:`read_fwf` in which the compression type of a file was not being properly inferred (:issue:`22199`)
- Bug in :func:`pandas.io.json.json_normalize` that caused it to raise ``TypeError`` when two consecutive elements of ``record_path`` are dicts (:issue:`22706`)
- Bug in :meth:`DataFrame.to_stata`, :class:`pandas.io.stata.StataWriter` and :class:`pandas.io.stata.StataWriter117` where a exception would leave a partially written and invalid dta file (:issue:`23573`)
- Bug in :meth:`DataFrame.to_stata` and :class:`pandas.io.stata.StataWriter117` that produced invalid files when using strLs with non-ASCII characters (:issue:`23573`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index aadca1fcb3bef..926d889bf8f91 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -401,7 +401,7 @@ def _read(filepath_or_buffer, kwds):
encoding = re.sub('_', '-', encoding).lower()
kwds['encoding'] = encoding
- compression = kwds.get('compression')
+ compression = kwds.get('compression', 'infer')
compression = _infer_compression(filepath_or_buffer, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
filepath_or_buffer, encoding, compression)
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index e8c5b37579d71..172bbe0bad4c7 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -555,20 +555,26 @@ def test_default_delimiter():
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize("compression", ["gzip", "bz2"])
-def test_fwf_compression(compression):
+@pytest.mark.parametrize("infer", [True, False, None])
+def test_fwf_compression(compression_only, infer):
data = """1111111111
2222222222
3333333333""".strip()
+ compression = compression_only
+ extension = "gz" if compression == "gzip" else compression
+
kwargs = dict(widths=[5, 5], names=["one", "two"])
expected = read_fwf(StringIO(data), **kwargs)
if compat.PY3:
data = bytes(data, encoding="utf-8")
- with tm.ensure_clean() as path:
+ with tm.ensure_clean(filename="tmp." + extension) as path:
tm.write_to_compressed(compression, path, data)
- result = read_fwf(path, compression=compression, **kwargs)
+ if infer is not None:
+ kwargs["compression"] = "infer" if infer else compression
+
+ result = read_fwf(path, **kwargs)
tm.assert_frame_equal(result, expected)
| - [x] closes #22199
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22200 | 2018-08-05T04:06:17Z | 2018-12-27T19:26:23Z | 2018-12-27T19:26:23Z | 2018-12-27T19:26:38Z |
BUG: DataFrame.asof() : Timezone Awareness / Naivety comparison TypeError | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index f419e2d06239c..ea0677a0edf28 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -561,6 +561,7 @@ Timezones
- Fixed bug where :meth:`DataFrame.describe` and :meth:`Series.describe` on tz-aware datetimes did not show `first` and `last` result (:issue:`21328`)
- Bug in :class:`DatetimeIndex` comparisons failing to raise ``TypeError`` when comparing timezone-aware ``DatetimeIndex`` against ``np.datetime64`` (:issue:`22074`)
- Bug in ``DataFrame`` assignment with a timezone-aware scalar (:issue:`19843`)
+- Bug in :func:`Dataframe.asof` that raised a ``TypeError`` when attempting to compare tz-naive and tz-aware timestamps (:issue:`21194`)
- Bug when constructing a :class:`DatetimeIndex` with :class:`Timestamp`s constructed with the ``replace`` method across DST (:issue:`18785`)
- Bug when setting a new value with :meth:`DataFrame.loc` with a :class:`DatetimeIndex` with a DST transition (:issue:`18308`, :issue:`20724`)
- Bug in :meth:`DatetimeIndex.unique` that did not re-localize tz-aware dates correctly (:issue:`21737`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 90d87e6f1cc23..bfa669a0ca164 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2463,7 +2463,7 @@ def asof_locs(self, where, mask):
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
- result[(locs == 0) & (where < self.values[first])] = -1
+ result[(locs == 0) & (where.values < self.values[first])] = -1
return result
diff --git a/pandas/tests/frame/test_asof.py b/pandas/tests/frame/test_asof.py
index fea6a5370109e..091a5fb14e65e 100644
--- a/pandas/tests/frame/test_asof.py
+++ b/pandas/tests/frame/test_asof.py
@@ -1,6 +1,7 @@
# coding=utf-8
import numpy as np
+import pytest
from pandas import (DataFrame, date_range, Timestamp, Series,
to_datetime)
@@ -106,3 +107,21 @@ def test_all_nans(self):
result = DataFrame(np.nan, index=[1, 2], columns=['A', 'B']).asof(3)
expected = Series(np.nan, index=['A', 'B'], name=3)
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "stamp,expected",
+ [(Timestamp('2018-01-01 23:22:43.325+00:00'),
+ Series(2.0, name=Timestamp('2018-01-01 23:22:43.325+00:00'))),
+ (Timestamp('2018-01-01 22:33:20.682+01:00'),
+ Series(1.0, name=Timestamp('2018-01-01 22:33:20.682+01:00'))),
+ ]
+ )
+ def test_time_zone_aware_index(self, stamp, expected):
+ # GH21194
+ # Testing awareness of DataFrame index considering different
+ # UTC and timezone
+ df = DataFrame(data=[1, 2],
+ index=[Timestamp('2018-01-01 21:00:05.001+00:00'),
+ Timestamp('2018-01-01 22:35:10.550+00:00')])
+ result = df.asof(stamp)
+ tm.assert_series_equal(result, expected)
| - [X] closes #21194
- [X] tests added / passed
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22198 | 2018-08-04T21:32:43Z | 2018-08-06T10:36:30Z | 2018-08-06T10:36:30Z | 2018-08-06T11:11:03Z |
Finish moving period_helper to cython | diff --git a/ci/lint.sh b/ci/lint.sh
index ec99e1e559d6e..432be8f78aca6 100755
--- a/ci/lint.sh
+++ b/ci/lint.sh
@@ -93,7 +93,7 @@ if [ "$LINT" ]; then
# this particular codebase (e.g. src/headers, src/klib, src/msgpack). However,
# we can lint all header files since they aren't "generated" like C files are.
echo "Linting *.c and *.h"
- for path in '*.h' 'period_helper.c' 'datetime' 'parser' 'ujson'
+ for path in '*.h' 'parser' 'ujson'
do
echo "linting -> pandas/_libs/src/$path"
cpplint --quiet --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive pandas/_libs/src/$path
@@ -101,6 +101,11 @@ if [ "$LINT" ]; then
RET=1
fi
done
+ echo "linting -> pandas/_libs/tslibs/src/datetime"
+ cpplint --quiet --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive pandas/_libs/tslibs/src/datetime
+ if [ $? -ne "0" ]; then
+ RET=1
+ fi
echo "Linting *.c and *.h DONE"
echo "Check for invalid testing"
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 811f0d25c3838..a880259c19140 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-# cython: profile=False
from datetime import datetime, date
from cpython cimport (
@@ -34,9 +33,9 @@ cdef extern from "../src/datetime/np_datetime.h":
cimport util
from util cimport is_period_object, is_string_object, INT32_MIN
-from pandas._libs.tslibs.timedeltas import Timedelta
from timestamps import Timestamp
from timezones cimport is_utc, is_tzlocal, get_dst_info
+from timedeltas import Timedelta
from timedeltas cimport delta_to_nanoseconds
cimport ccalendar
@@ -55,7 +54,100 @@ from offsets import _Tick
cdef bint PY2 = str == bytes
-cdef extern from "period_helper.h":
+
+ctypedef struct asfreq_info:
+ int64_t intraday_conversion_factor
+ int is_end
+ int to_end
+ int from_end
+
+ctypedef int64_t (*freq_conv_func)(int64_t, asfreq_info*) nogil
+
+
+cdef extern from *:
+ """
+ /*** FREQUENCY CONSTANTS ***/
+
+ #define FR_ANN 1000 /* Annual */
+ #define FR_ANNDEC FR_ANN /* Annual - December year end*/
+ #define FR_ANNJAN 1001 /* Annual - January year end*/
+ #define FR_ANNFEB 1002 /* Annual - February year end*/
+ #define FR_ANNMAR 1003 /* Annual - March year end*/
+ #define FR_ANNAPR 1004 /* Annual - April year end*/
+ #define FR_ANNMAY 1005 /* Annual - May year end*/
+ #define FR_ANNJUN 1006 /* Annual - June year end*/
+ #define FR_ANNJUL 1007 /* Annual - July year end*/
+ #define FR_ANNAUG 1008 /* Annual - August year end*/
+ #define FR_ANNSEP 1009 /* Annual - September year end*/
+ #define FR_ANNOCT 1010 /* Annual - October year end*/
+ #define FR_ANNNOV 1011 /* Annual - November year end*/
+
+ /* The standard quarterly frequencies with various fiscal year ends
+ eg, Q42005 for Q@OCT runs Aug 1, 2005 to Oct 31, 2005 */
+ #define FR_QTR 2000 /* Quarterly - December year end (default Q) */
+ #define FR_QTRDEC FR_QTR /* Quarterly - December year end */
+ #define FR_QTRJAN 2001 /* Quarterly - January year end */
+ #define FR_QTRFEB 2002 /* Quarterly - February year end */
+ #define FR_QTRMAR 2003 /* Quarterly - March year end */
+ #define FR_QTRAPR 2004 /* Quarterly - April year end */
+ #define FR_QTRMAY 2005 /* Quarterly - May year end */
+ #define FR_QTRJUN 2006 /* Quarterly - June year end */
+ #define FR_QTRJUL 2007 /* Quarterly - July year end */
+ #define FR_QTRAUG 2008 /* Quarterly - August year end */
+ #define FR_QTRSEP 2009 /* Quarterly - September year end */
+ #define FR_QTROCT 2010 /* Quarterly - October year end */
+ #define FR_QTRNOV 2011 /* Quarterly - November year end */
+
+ #define FR_MTH 3000 /* Monthly */
+
+ #define FR_WK 4000 /* Weekly */
+ #define FR_WKSUN FR_WK /* Weekly - Sunday end of week */
+ #define FR_WKMON 4001 /* Weekly - Monday end of week */
+ #define FR_WKTUE 4002 /* Weekly - Tuesday end of week */
+ #define FR_WKWED 4003 /* Weekly - Wednesday end of week */
+ #define FR_WKTHU 4004 /* Weekly - Thursday end of week */
+ #define FR_WKFRI 4005 /* Weekly - Friday end of week */
+ #define FR_WKSAT 4006 /* Weekly - Saturday end of week */
+
+ #define FR_BUS 5000 /* Business days */
+ #define FR_DAY 6000 /* Daily */
+ #define FR_HR 7000 /* Hourly */
+ #define FR_MIN 8000 /* Minutely */
+ #define FR_SEC 9000 /* Secondly */
+ #define FR_MS 10000 /* Millisecondly */
+ #define FR_US 11000 /* Microsecondly */
+ #define FR_NS 12000 /* Nanosecondly */
+
+ #define FR_UND -10000 /* Undefined */
+
+ static int64_t daytime_conversion_factor_matrix[7][7] = {
+ {1, 24, 1440, 86400, 86400000, 86400000000, 86400000000000},
+ {0, 1, 60, 3600, 3600000, 3600000000, 3600000000000},
+ {0, 0, 1, 60, 60000, 60000000, 60000000000},
+ {0, 0, 0, 1, 1000, 1000000, 1000000000},
+ {0, 0, 0, 0, 1, 1000, 1000000},
+ {0, 0, 0, 0, 0, 1, 1000},
+ {0, 0, 0, 0, 0, 0, 1}};
+
+ int max_value(int a, int b) { return a > b ? a : b; }
+
+ static int min_value(int a, int b) { return a < b ? a : b; }
+
+ npy_int64 get_daytime_conversion_factor(int from_index, int to_index) {
+ int row = min_value(from_index, to_index);
+ int col = max_value(from_index, to_index);
+ // row or col < 6 means frequency strictly lower than Daily, which
+ // do not use daytime_conversion_factors
+ if (row < 6) {
+ return 0;
+ } else if (col < 6) {
+ return 0;
+ }
+ return daytime_conversion_factor_matrix[row - 6][col - 6];
+ }
+ """
+ int64_t get_daytime_conversion_factor(int from_index, int to_index) nogil
+ int max_value(int left, int right) nogil
int FR_ANN
int FR_QTR
int FR_MTH
@@ -70,21 +162,513 @@ cdef extern from "period_helper.h":
int FR_BUS
int FR_UND
- ctypedef struct asfreq_info:
- int64_t intraday_conversion_factor
- int is_end
- int to_end
- int from_end
+cdef int64_t nofunc(int64_t ordinal, asfreq_info *af_info):
+ return np.iinfo(np.int32).min
- ctypedef int64_t (*freq_conv_func)(int64_t, asfreq_info*) nogil
- freq_conv_func get_asfreq_func(int fromFreq, int toFreq) nogil
+cdef int64_t no_op(int64_t ordinal, asfreq_info *af_info):
+ return ordinal
- int64_t get_daytime_conversion_factor(int from_index, int to_index) nogil
- int max_value(int left, int right) nogil
+
+cdef freq_conv_func get_asfreq_func(int from_freq, int to_freq) nogil:
+ cdef:
+ int from_group = get_freq_group(from_freq)
+ int to_group = get_freq_group(to_freq)
+
+ if from_group == FR_UND:
+ from_group = FR_DAY
+
+ if from_group == FR_BUS:
+ if to_group == FR_ANN:
+ return <freq_conv_func>asfreq_BtoA
+ elif to_group == FR_QTR:
+ return <freq_conv_func>asfreq_BtoQ
+ elif to_group == FR_MTH:
+ return <freq_conv_func>asfreq_BtoM
+ elif to_group == FR_WK:
+ return <freq_conv_func>asfreq_BtoW
+ elif to_group == FR_BUS:
+ return <freq_conv_func>no_op
+ elif to_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC, FR_MS, FR_US, FR_NS]:
+ return <freq_conv_func>asfreq_BtoDT
+ else:
+ return <freq_conv_func>nofunc
+
+ elif to_group == FR_BUS:
+ if from_group == FR_ANN:
+ return <freq_conv_func>asfreq_AtoB
+ elif from_group == FR_QTR:
+ return <freq_conv_func>asfreq_QtoB
+ elif from_group == FR_MTH:
+ return <freq_conv_func>asfreq_MtoB
+ elif from_group == FR_WK:
+ return <freq_conv_func>asfreq_WtoB
+ elif from_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC,
+ FR_MS, FR_US, FR_NS]:
+ return <freq_conv_func>asfreq_DTtoB
+ else:
+ return <freq_conv_func>nofunc
+
+ elif from_group == FR_ANN:
+ if to_group == FR_ANN:
+ return <freq_conv_func>asfreq_AtoA
+ elif to_group == FR_QTR:
+ return <freq_conv_func>asfreq_AtoQ
+ elif to_group == FR_MTH:
+ return <freq_conv_func>asfreq_AtoM
+ elif to_group == FR_WK:
+ return <freq_conv_func>asfreq_AtoW
+ elif to_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC, FR_MS, FR_US, FR_NS]:
+ return <freq_conv_func>asfreq_AtoDT
+ else:
+ return <freq_conv_func>nofunc
+
+ elif from_group == FR_QTR:
+ if to_group == FR_ANN:
+ return <freq_conv_func>asfreq_QtoA
+ elif to_group == FR_QTR:
+ return <freq_conv_func>asfreq_QtoQ
+ elif to_group == FR_MTH:
+ return <freq_conv_func>asfreq_QtoM
+ elif to_group == FR_WK:
+ return <freq_conv_func>asfreq_QtoW
+ elif to_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC, FR_MS, FR_US, FR_NS]:
+ return <freq_conv_func>asfreq_QtoDT
+ else:
+ return <freq_conv_func>nofunc
+
+ elif from_group == FR_MTH:
+ if to_group == FR_ANN:
+ return <freq_conv_func>asfreq_MtoA
+ elif to_group == FR_QTR:
+ return <freq_conv_func>asfreq_MtoQ
+ elif to_group == FR_MTH:
+ return <freq_conv_func>no_op
+ elif to_group == FR_WK:
+ return <freq_conv_func>asfreq_MtoW
+ elif to_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC, FR_MS, FR_US, FR_NS]:
+ return <freq_conv_func>asfreq_MtoDT
+ else:
+ return <freq_conv_func>nofunc
+
+ elif from_group == FR_WK:
+ if to_group == FR_ANN:
+ return <freq_conv_func>asfreq_WtoA
+ elif to_group == FR_QTR:
+ return <freq_conv_func>asfreq_WtoQ
+ elif to_group == FR_MTH:
+ return <freq_conv_func>asfreq_WtoM
+ elif to_group == FR_WK:
+ return <freq_conv_func>asfreq_WtoW
+ elif to_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC, FR_MS, FR_US, FR_NS]:
+ return <freq_conv_func>asfreq_WtoDT
+ else:
+ return <freq_conv_func>nofunc
+
+ elif from_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC, FR_MS, FR_US, FR_NS]:
+ if to_group == FR_ANN:
+ return <freq_conv_func>asfreq_DTtoA
+ elif to_group == FR_QTR:
+ return <freq_conv_func>asfreq_DTtoQ
+ elif to_group == FR_MTH:
+ return <freq_conv_func>asfreq_DTtoM
+ elif to_group == FR_WK:
+ return <freq_conv_func>asfreq_DTtoW
+ elif to_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC, FR_MS, FR_US, FR_NS]:
+ if from_group > to_group:
+ return <freq_conv_func>downsample_daytime
+ else:
+ return <freq_conv_func>upsample_daytime
+
+ else:
+ return <freq_conv_func>nofunc
+
+ else:
+ return <freq_conv_func>nofunc
+
+
+# --------------------------------------------------------------------
+# Frequency Conversion Helpers
+
+cdef int64_t DtoB_weekday(int64_t unix_date) nogil:
+ return ((unix_date + 4) // 7) * 5 + ((unix_date + 4) % 7) - 4
+
+
+cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back, int64_t unix_date):
+ cdef:
+ int day_of_week = dayofweek(dts.year, dts.month, dts.day)
+
+ if roll_back == 1:
+ if day_of_week > 4:
+ # change to friday before weekend
+ unix_date -= (day_of_week - 4)
+ else:
+ if day_of_week > 4:
+ # change to Monday after weekend
+ unix_date += (7 - day_of_week)
+
+ return DtoB_weekday(unix_date)
+
+
+cdef inline int64_t upsample_daytime(int64_t ordinal, asfreq_info *af_info):
+ if (af_info.is_end):
+ return (ordinal + 1) * af_info.intraday_conversion_factor - 1
+ else:
+ return ordinal * af_info.intraday_conversion_factor
+
+
+cdef inline int64_t downsample_daytime(int64_t ordinal, asfreq_info *af_info):
+ return ordinal // (af_info.intraday_conversion_factor)
+
+
+cdef inline int64_t transform_via_day(int64_t ordinal,
+ asfreq_info *af_info,
+ freq_conv_func first_func,
+ freq_conv_func second_func):
+ cdef:
+ int64_t result
+
+ result = first_func(ordinal, af_info)
+ result = second_func(result, af_info)
+ return result
+
+# --------------------------------------------------------------------
+# Conversion _to_ Daily Freq
+
+cdef void AtoD_ym(int64_t ordinal, int64_t *year,
+ int *month, asfreq_info *af_info):
+ year[0] = ordinal + 1970
+ month[0] = 1
+
+ if af_info.from_end != 12:
+ month[0] += af_info.from_end
+ if month[0] > 12:
+ # This case is never reached, but is kept for symmetry
+ # with QtoD_ym
+ month[0] -= 12
+ else:
+ year[0] -= 1
+
+
+cdef int64_t asfreq_AtoDT(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ int64_t unix_date, year
+ int month
+
+ ordinal += af_info.is_end
+ AtoD_ym(ordinal, &year, &month, af_info)
+
+ unix_date = unix_date_from_ymd(year, month, 1)
+ unix_date -= af_info.is_end
+ return upsample_daytime(unix_date, af_info)
+
+
+cdef void QtoD_ym(int64_t ordinal, int *year,
+ int *month, asfreq_info *af_info):
+ year[0] = ordinal // 4 + 1970
+ month[0] = (ordinal % 4) * 3 + 1
+
+ if af_info.from_end != 12:
+ month[0] += af_info.from_end
+ if month[0] > 12:
+ month[0] -= 12
+ else:
+ year[0] -= 1
+
+
+cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ int64_t unix_date
+ int year, month
+
+ ordinal += af_info.is_end
+ QtoD_ym(ordinal, &year, &month, af_info)
+
+ unix_date = unix_date_from_ymd(year, month, 1)
+ unix_date -= af_info.is_end
+ return upsample_daytime(unix_date, af_info)
+
+
+cdef void MtoD_ym(int64_t ordinal, int *year, int *month):
+ year[0] = ordinal // 12 + 1970
+ month[0] = ordinal % 12 + 1
+
+
+cdef int64_t asfreq_MtoDT(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ int64_t unix_date
+ int year, month
+
+ ordinal += af_info.is_end
+ MtoD_ym(ordinal, &year, &month)
+
+ unix_date = unix_date_from_ymd(year, month, 1)
+ unix_date -= af_info.is_end
+ return upsample_daytime(unix_date, af_info)
+
+
+cdef int64_t asfreq_WtoDT(int64_t ordinal, asfreq_info *af_info):
+ ordinal = (ordinal * 7 + af_info.from_end - 4 +
+ (7 - 1) * (af_info.is_end - 1))
+ return upsample_daytime(ordinal, af_info)
+
+
+# --------------------------------------------------------------------
+# Conversion _to_ BusinessDay Freq
+
+cdef int64_t asfreq_AtoB(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ int roll_back
+ npy_datetimestruct dts
+ int64_t unix_date = asfreq_AtoDT(ordinal, af_info)
+
+ pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts)
+ roll_back = af_info.is_end
+ return DtoB(&dts, roll_back, unix_date)
+
+
+cdef int64_t asfreq_QtoB(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ int roll_back
+ npy_datetimestruct dts
+ int64_t unix_date = asfreq_QtoDT(ordinal, af_info)
+
+ pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts)
+ roll_back = af_info.is_end
+ return DtoB(&dts, roll_back, unix_date)
+
+
+cdef int64_t asfreq_MtoB(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ int roll_back
+ npy_datetimestruct dts
+ int64_t unix_date = asfreq_MtoDT(ordinal, af_info)
+
+ pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts)
+ roll_back = af_info.is_end
+ return DtoB(&dts, roll_back, unix_date)
+
+
+cdef int64_t asfreq_WtoB(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ int roll_back
+ npy_datetimestruct dts
+ int64_t unix_date = asfreq_WtoDT(ordinal, af_info)
+
+ pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts)
+ roll_back = af_info.is_end
+ return DtoB(&dts, roll_back, unix_date)
+
+
+cdef int64_t asfreq_DTtoB(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ int roll_back
+ npy_datetimestruct dts
+ int64_t unix_date = downsample_daytime(ordinal, af_info)
+
+ pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts)
+ # This usage defines roll_back the opposite way from the others
+ roll_back = 1 - af_info.is_end
+ return DtoB(&dts, roll_back, unix_date)
+
+
+# ----------------------------------------------------------------------
+# Conversion _from_ Daily Freq
+
+cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ npy_datetimestruct dts
+
+ ordinal = downsample_daytime(ordinal, af_info)
+ pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts)
+ if dts.month > af_info.to_end:
+ return <int64_t>(dts.year + 1 - 1970)
+ else:
+ return <int64_t>(dts.year - 1970)
+
+
+cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year):
+ cdef:
+ npy_datetimestruct dts
+ int quarter
+
+ pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts)
+ # TODO: Another version of this function used
+ # date_info_from_days_and_time(&dts, unix_date, 0)
+ # instead of pandas_datetime_to_datetimestruct; is one more performant?
+ if af_info.to_end != 12:
+ dts.month -= af_info.to_end
+ if dts.month <= 0:
+ dts.month += 12
+ else:
+ dts.year += 1
+
+ year[0] = dts.year
+ quarter = month_to_quarter(dts.month)
+ return quarter
+
+
+cdef int64_t asfreq_DTtoQ(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ int year, quarter
+
+ ordinal = downsample_daytime(ordinal, af_info)
+
+ quarter = DtoQ_yq(ordinal, af_info, &year)
+ return <int64_t>((year - 1970) * 4 + quarter - 1)
+
+
+cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info):
+ cdef:
+ npy_datetimestruct dts
+
+ ordinal = downsample_daytime(ordinal, af_info)
+ pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts)
+ return <int64_t>((dts.year - 1970) * 12 + dts.month - 1)
+
+
+cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info):
+ ordinal = downsample_daytime(ordinal, af_info)
+ return (ordinal + 3 - af_info.to_end) // 7 + 1
+
+
+# --------------------------------------------------------------------
+# Conversion _from_ BusinessDay Freq
+
+cdef int64_t asfreq_BtoDT(int64_t ordinal, asfreq_info *af_info):
+ ordinal = ((ordinal + 3) // 5) * 7 + (ordinal + 3) % 5 -3
+ return upsample_daytime(ordinal, af_info)
+
+
+cdef int64_t asfreq_BtoA(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_BtoDT,
+ <freq_conv_func>asfreq_DTtoA)
+
+
+cdef int64_t asfreq_BtoQ(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_BtoDT,
+ <freq_conv_func>asfreq_DTtoQ)
+
+
+cdef int64_t asfreq_BtoM(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_BtoDT,
+ <freq_conv_func>asfreq_DTtoM)
+
+
+cdef int64_t asfreq_BtoW(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_BtoDT,
+ <freq_conv_func>asfreq_DTtoW)
+
+
+# ----------------------------------------------------------------------
+# Conversion _from_ Annual Freq
+
+cdef int64_t asfreq_AtoA(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_AtoDT,
+ <freq_conv_func>asfreq_DTtoA)
+
+
+cdef int64_t asfreq_AtoQ(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_AtoDT,
+ <freq_conv_func>asfreq_DTtoQ);
+
+
+cdef int64_t asfreq_AtoM(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_AtoDT,
+ <freq_conv_func>asfreq_DTtoM)
+
+
+cdef int64_t asfreq_AtoW(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_AtoDT,
+ <freq_conv_func>asfreq_DTtoW)
+# ----------------------------------------------------------------------
+# Conversion _from_ Quarterly Freq
+
+cdef int64_t asfreq_QtoQ(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_QtoDT,
+ <freq_conv_func>asfreq_DTtoQ)
+
+
+cdef int64_t asfreq_QtoA(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_QtoDT,
+ <freq_conv_func>asfreq_DTtoA)
+
+
+cdef int64_t asfreq_QtoM(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_QtoDT,
+ <freq_conv_func>asfreq_DTtoM)
+
+
+cdef int64_t asfreq_QtoW(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_QtoDT,
+ <freq_conv_func>asfreq_DTtoW)
+
+
+# ----------------------------------------------------------------------
+# Conversion _from_ Monthly Freq
+
+cdef int64_t asfreq_MtoA(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_MtoDT,
+ <freq_conv_func>asfreq_DTtoA)
+
+
+cdef int64_t asfreq_MtoQ(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_MtoDT,
+ <freq_conv_func>asfreq_DTtoQ)
+
+
+cdef int64_t asfreq_MtoW(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_MtoDT,
+ <freq_conv_func>asfreq_DTtoW)
+
+
+# ----------------------------------------------------------------------
+# Conversion _from_ Weekly Freq
+
+cdef int64_t asfreq_WtoA(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_WtoDT,
+ <freq_conv_func>asfreq_DTtoA)
+
+
+cdef int64_t asfreq_WtoQ(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_WtoDT,
+ <freq_conv_func>asfreq_DTtoQ)
+
+
+cdef int64_t asfreq_WtoM(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_WtoDT,
+ <freq_conv_func>asfreq_DTtoM)
+
+
+cdef int64_t asfreq_WtoW(int64_t ordinal, asfreq_info *af_info):
+ return transform_via_day(ordinal, af_info,
+ <freq_conv_func>asfreq_WtoDT,
+ <freq_conv_func>asfreq_DTtoW)
+
+
+# ----------------------------------------------------------------------
+
@cython.cdivision
cdef char* c_strftime(npy_datetimestruct *dts, char *fmt):
"""
@@ -133,6 +717,23 @@ cdef inline int get_freq_group_index(int freq) nogil:
return freq // 1000
+# Find the unix_date (days elapsed since datetime(1970, 1, 1)
+# for the given year/month/day.
+# Assumes GREGORIAN_CALENDAR */
+cdef int64_t unix_date_from_ymd(int year, int month, int day) nogil:
+ # Calculate the absolute date
+ cdef:
+ npy_datetimestruct dts
+ int64_t unix_date
+
+ memset(&dts, 0, sizeof(npy_datetimestruct))
+ dts.year = year
+ dts.month = month
+ dts.day = day
+ unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, &dts)
+ return unix_date
+
+
# specifically _dont_ use cdvision or else ordinals near -1 are assigned to
# incorrect dates GH#19643
@cython.cdivision(False)
@@ -394,25 +995,6 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year):
return qtr_freq
-cdef int DtoQ_yq(int64_t unix_date, asfreq_info *af_info, int *year):
- cdef:
- npy_datetimestruct dts
- int quarter
-
- date_info_from_days_and_time(&dts, unix_date, 0)
-
- if af_info.to_end != 12:
- dts.month -= af_info.to_end
- if dts.month <= 0:
- dts.month += 12
- else:
- dts.year += 1
-
- year[0] = dts.year
- quarter = month_to_quarter(dts.month)
- return quarter
-
-
cdef inline int month_to_quarter(int month):
return (month - 1) // 3 + 1
@@ -1545,7 +2127,7 @@ cdef class _Period(object):
See Also
--------
Period.year : Return the calendar year of the period.
-
+
Examples
--------
If the natural and fiscal year are the same, `qyear` and `year` will
diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime.c b/pandas/_libs/tslibs/src/datetime/np_datetime.c
index 1b33f38441253..659afd152106d 100644
--- a/pandas/_libs/tslibs/src/datetime/np_datetime.c
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime.c
@@ -18,7 +18,7 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#ifndef NPY_NO_DEPRECATED_API
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#endif
+#endif // NPY_NO_DEPRECATED_API
#include <Python.h>
#include <datetime.h>
diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime.h b/pandas/_libs/tslibs/src/datetime/np_datetime.h
index 9fa85b18dd219..3974d5083f51b 100644
--- a/pandas/_libs/tslibs/src/datetime/np_datetime.h
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime.h
@@ -14,12 +14,12 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
*/
-#ifndef PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_
-#define PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_
+#ifndef PANDAS__LIBS_TSLIBS_SRC_DATETIME_NP_DATETIME_H_
+#define PANDAS__LIBS_TSLIBS_SRC_DATETIME_NP_DATETIME_H_
#ifndef NPY_NO_DEPRECATED_API
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#endif
+#endif // NPY_NO_DEPRECATED_API
#include <numpy/ndarraytypes.h>
#include <datetime.h>
@@ -79,4 +79,4 @@ void
add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes);
-#endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_
+#endif // PANDAS__LIBS_TSLIBS_SRC_DATETIME_NP_DATETIME_H_
diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c
index 19ade6fa5add9..05ccdd13598fb 100644
--- a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c
@@ -24,7 +24,7 @@ This file implements string parsing and creation for NumPy datetime.
#ifndef NPY_NO_DEPRECATED_API
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#endif
+#endif // NPY_NO_DEPRECATED_API
#include <Python.h>
diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h
index e9a7fd74b05e5..15d5dd357eaef 100644
--- a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h
@@ -19,12 +19,12 @@ This file implements string parsing and creation for NumPy datetime.
*/
-#ifndef PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
-#define PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
+#ifndef PANDAS__LIBS_TSLIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
+#define PANDAS__LIBS_TSLIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
#ifndef NPY_NO_DEPRECATED_API
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#endif
+#endif // NPY_NO_DEPRECATED_API
/*
* Parses (almost) standard ISO 8601 date strings. The differences are:
@@ -80,4 +80,4 @@ int
make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen,
NPY_DATETIMEUNIT base);
-#endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
+#endif // PANDAS__LIBS_TSLIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
diff --git a/pandas/_libs/tslibs/src/period_helper.c b/pandas/_libs/tslibs/src/period_helper.c
deleted file mode 100644
index 4bf3774e35a68..0000000000000
--- a/pandas/_libs/tslibs/src/period_helper.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/*
-Copyright (c) 2016, PyData Development Team
-All rights reserved.
-
-Distributed under the terms of the BSD Simplified License.
-
-The full license is in the LICENSE file, distributed with this software.
-
-Borrowed and derived code from scikits.timeseries that we will expose via
-Cython to pandas. This primarily concerns interval representation and
-frequency conversion routines.
-
-See end of file for stuff pandas uses (search for 'pandas').
-*/
-
-#ifndef NPY_NO_DEPRECATED_API
-#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#endif
-
-#include "period_helper.h"
-#include "datetime/np_datetime.h"
-
-/* ------------------------------------------------------------------
- * Code derived from scikits.timeseries
- * ------------------------------------------------------------------*/
-
-static int mod_compat(int x, int m) {
- int result = x % m;
- if (result < 0) return result + m;
- return result;
-}
-
-static int floordiv(int x, int divisor) {
- if (x < 0) {
- if (mod_compat(x, divisor)) {
- return x / divisor - 1;
- } else {
- return x / divisor;
- }
- } else {
- return x / divisor;
- }
-}
-
-
-static int monthToQuarter(int month) { return ((month - 1) / 3) + 1; }
-
-
-/* Find the unix_date (days elapsed since datetime(1970, 1, 1)
- * for the given year/month/day.
- * Assumes GREGORIAN_CALENDAR */
-npy_int64 unix_date_from_ymd(int year, int month, int day) {
- /* Calculate the absolute date */
- npy_datetimestruct dts;
- npy_int64 unix_date;
-
- memset(&dts, 0, sizeof(npy_datetimestruct));
- dts.year = year;
- dts.month = month;
- dts.day = day;
- unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, &dts);
- return unix_date;
-}
-
-
-///////////////////////////////////////////////
-
-// frequency specific conversion routines
-// each function must take an integer fromDate and
-// a char relation ('S' or 'E' for 'START' or 'END')
-///////////////////////////////////////////////////////////////////////
-
-// helpers for frequency conversion routines //
-
-static npy_int64 daytime_conversion_factor_matrix[7][7] = {
- {1, 24, 1440, 86400, 86400000, 86400000000, 86400000000000},
- {0, 1, 60, 3600, 3600000, 3600000000, 3600000000000},
- {0, 0, 1, 60, 60000, 60000000, 60000000000},
- {0, 0, 0, 1, 1000, 1000000, 1000000000},
- {0, 0, 0, 0, 1, 1000, 1000000},
- {0, 0, 0, 0, 0, 1, 1000},
- {0, 0, 0, 0, 0, 0, 1}};
-
-int max_value(int a, int b) { return a > b ? a : b; }
-
-static int min_value(int a, int b) { return a < b ? a : b; }
-
-static int get_freq_group(int freq) { return (freq / 1000) * 1000; }
-
-
-npy_int64 get_daytime_conversion_factor(int from_index, int to_index) {
- int row = min_value(from_index, to_index);
- int col = max_value(from_index, to_index);
- // row or col < 6 means frequency strictly lower than Daily, which
- // do not use daytime_conversion_factors
- if (row < 6) {
- return 0;
- } else if (col < 6) {
- return 0;
- }
- return daytime_conversion_factor_matrix[row - 6][col - 6];
-}
-
-static npy_int64 upsample_daytime(npy_int64 ordinal, asfreq_info *af_info) {
- if (af_info->is_end) {
- return (ordinal + 1) * af_info->intraday_conversion_factor - 1;
- } else {
- return ordinal * af_info->intraday_conversion_factor;
- }
-}
-
-static npy_int64 downsample_daytime(npy_int64 ordinal, asfreq_info *af_info) {
- return ordinal / (af_info->intraday_conversion_factor);
-}
-
-static npy_int64 transform_via_day(npy_int64 ordinal,
- asfreq_info *af_info,
- freq_conv_func first_func,
- freq_conv_func second_func) {
- npy_int64 result;
-
- result = (*first_func)(ordinal, af_info);
- result = (*second_func)(result, af_info);
-
- return result;
-}
-
-static npy_int64 DtoB_weekday(npy_int64 unix_date) {
- return floordiv(unix_date + 4, 7) * 5 + mod_compat(unix_date + 4, 7) - 4;
-}
-
-static npy_int64 DtoB(npy_datetimestruct *dts,
- int roll_back, npy_int64 unix_date) {
- int day_of_week = dayofweek(dts->year, dts->month, dts->day);
-
- if (roll_back == 1) {
- if (day_of_week > 4) {
- // change to friday before weekend
- unix_date -= (day_of_week - 4);
- }
- } else {
- if (day_of_week > 4) {
- // change to Monday after weekend
- unix_date += (7 - day_of_week);
- }
- }
- return DtoB_weekday(unix_date);
-}
-
-
-//************ FROM DAILY ***************
-
-static npy_int64 asfreq_DTtoA(npy_int64 ordinal, asfreq_info *af_info) {
- npy_datetimestruct dts;
- ordinal = downsample_daytime(ordinal, af_info);
- pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts);
- if (dts.month > af_info->to_end) {
- return (npy_int64)(dts.year + 1 - 1970);
- } else {
- return (npy_int64)(dts.year - 1970);
- }
-}
-
-static int DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year) {
- npy_datetimestruct dts;
- int quarter;
-
- pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts);
- if (af_info->to_end != 12) {
- dts.month -= af_info->to_end;
- if (dts.month <= 0) {
- dts.month += 12;
- } else {
- dts.year += 1;
- }
- }
-
- *year = dts.year;
- quarter = monthToQuarter(dts.month);
- return quarter;
-}
-
-static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, asfreq_info *af_info) {
- int year, quarter;
-
- ordinal = downsample_daytime(ordinal, af_info);
-
- quarter = DtoQ_yq(ordinal, af_info, &year);
- return (npy_int64)((year - 1970) * 4 + quarter - 1);
-}
-
-static npy_int64 asfreq_DTtoM(npy_int64 ordinal, asfreq_info *af_info) {
- npy_datetimestruct dts;
-
- ordinal = downsample_daytime(ordinal, af_info);
-
- pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts);
- return (npy_int64)((dts.year - 1970) * 12 + dts.month - 1);
-}
-
-static npy_int64 asfreq_DTtoW(npy_int64 ordinal, asfreq_info *af_info) {
- ordinal = downsample_daytime(ordinal, af_info);
- return floordiv(ordinal + 3 - af_info->to_end, 7) + 1;
-}
-
-static npy_int64 asfreq_DTtoB(npy_int64 ordinal, asfreq_info *af_info) {
- int roll_back;
- npy_datetimestruct dts;
- npy_int64 unix_date = downsample_daytime(ordinal, af_info);
- pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts);
-
- // This usage defines roll_back the opposite way from the others
- roll_back = 1 - af_info->is_end;
- return DtoB(&dts, roll_back, unix_date);
-}
-
-//************ FROM BUSINESS ***************
-
-static npy_int64 asfreq_BtoDT(npy_int64 ordinal, asfreq_info *af_info) {
- ordinal = floordiv(ordinal + 3, 5) * 7 + mod_compat(ordinal + 3, 5) - 3;
-
- return upsample_daytime(ordinal, af_info);
-}
-
-static npy_int64 asfreq_BtoA(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_BtoDT, asfreq_DTtoA);
-}
-
-static npy_int64 asfreq_BtoQ(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_BtoDT, asfreq_DTtoQ);
-}
-
-static npy_int64 asfreq_BtoM(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_BtoDT, asfreq_DTtoM);
-}
-
-static npy_int64 asfreq_BtoW(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_BtoDT, asfreq_DTtoW);
-}
-
-//************ FROM WEEKLY ***************
-
-static npy_int64 asfreq_WtoDT(npy_int64 ordinal, asfreq_info *af_info) {
- ordinal = ordinal * 7 + af_info->from_end - 4 +
- (7 - 1) * (af_info->is_end - 1);
- return upsample_daytime(ordinal, af_info);
-}
-
-static npy_int64 asfreq_WtoA(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_WtoDT, asfreq_DTtoA);
-}
-
-static npy_int64 asfreq_WtoQ(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_WtoDT, asfreq_DTtoQ);
-}
-
-static npy_int64 asfreq_WtoM(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_WtoDT, asfreq_DTtoM);
-}
-
-static npy_int64 asfreq_WtoW(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_WtoDT, asfreq_DTtoW);
-}
-
-static npy_int64 asfreq_WtoB(npy_int64 ordinal, asfreq_info *af_info) {
- int roll_back;
- npy_datetimestruct dts;
- npy_int64 unix_date = asfreq_WtoDT(ordinal, af_info);
-
- pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts);
- roll_back = af_info->is_end;
- return DtoB(&dts, roll_back, unix_date);
-}
-
-//************ FROM MONTHLY ***************
-static void MtoD_ym(npy_int64 ordinal, int *year, int *month) {
- *year = floordiv(ordinal, 12) + 1970;
- *month = mod_compat(ordinal, 12) + 1;
-}
-
-static npy_int64 asfreq_MtoDT(npy_int64 ordinal, asfreq_info *af_info) {
- npy_int64 unix_date;
- int year, month;
-
- ordinal += af_info->is_end;
- MtoD_ym(ordinal, &year, &month);
-
- unix_date = unix_date_from_ymd(year, month, 1);
- unix_date -= af_info->is_end;
- return upsample_daytime(unix_date, af_info);
-}
-
-static npy_int64 asfreq_MtoA(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_MtoDT, asfreq_DTtoA);
-}
-
-static npy_int64 asfreq_MtoQ(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_MtoDT, asfreq_DTtoQ);
-}
-
-static npy_int64 asfreq_MtoW(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_MtoDT, asfreq_DTtoW);
-}
-
-static npy_int64 asfreq_MtoB(npy_int64 ordinal, asfreq_info *af_info) {
- int roll_back;
- npy_datetimestruct dts;
- npy_int64 unix_date = asfreq_MtoDT(ordinal, af_info);
-
- pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts);
- roll_back = af_info->is_end;
- return DtoB(&dts, roll_back, unix_date);
-}
-
-//************ FROM QUARTERLY ***************
-
-static void QtoD_ym(npy_int64 ordinal, int *year, int *month,
- asfreq_info *af_info) {
- *year = floordiv(ordinal, 4) + 1970;
- *month = mod_compat(ordinal, 4) * 3 + 1;
-
- if (af_info->from_end != 12) {
- *month += af_info->from_end;
- if (*month > 12) {
- *month -= 12;
- } else {
- *year -= 1;
- }
- }
-}
-
-static npy_int64 asfreq_QtoDT(npy_int64 ordinal, asfreq_info *af_info) {
- npy_int64 unix_date;
- int year, month;
-
- ordinal += af_info->is_end;
- QtoD_ym(ordinal, &year, &month, af_info);
-
- unix_date = unix_date_from_ymd(year, month, 1);
- unix_date -= af_info->is_end;
- return upsample_daytime(unix_date, af_info);
-}
-
-static npy_int64 asfreq_QtoQ(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_QtoDT, asfreq_DTtoQ);
-}
-
-static npy_int64 asfreq_QtoA(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_QtoDT, asfreq_DTtoA);
-}
-
-static npy_int64 asfreq_QtoM(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_QtoDT, asfreq_DTtoM);
-}
-
-static npy_int64 asfreq_QtoW(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_QtoDT, asfreq_DTtoW);
-}
-
-static npy_int64 asfreq_QtoB(npy_int64 ordinal, asfreq_info *af_info) {
- int roll_back;
- npy_datetimestruct dts;
- npy_int64 unix_date = asfreq_QtoDT(ordinal, af_info);
-
- pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts);
- roll_back = af_info->is_end;
- return DtoB(&dts, roll_back, unix_date);
-}
-
-//************ FROM ANNUAL ***************
-
-static void AtoD_ym(npy_int64 ordinal, npy_int64 *year, int *month,
- asfreq_info *af_info) {
- *year = ordinal + 1970;
- *month = 1;
-
- if (af_info->from_end != 12) {
- *month += af_info->from_end;
- if (*month > 12) {
- // This case is never reached, but is kept for symmetry
- // with QtoD_ym
- *month -= 12;
- } else {
- *year -= 1;
- }
- }
-}
-
-static npy_int64 asfreq_AtoDT(npy_int64 ordinal, asfreq_info *af_info) {
- npy_int64 unix_date, year;
- int month;
-
- ordinal += af_info->is_end;
- AtoD_ym(ordinal, &year, &month, af_info);
-
- unix_date = unix_date_from_ymd(year, month, 1);
- unix_date -= af_info->is_end;
- return upsample_daytime(unix_date, af_info);
-}
-
-static npy_int64 asfreq_AtoA(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_AtoDT, asfreq_DTtoA);
-}
-
-static npy_int64 asfreq_AtoQ(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_AtoDT, asfreq_DTtoQ);
-}
-
-static npy_int64 asfreq_AtoM(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_AtoDT, asfreq_DTtoM);
-}
-
-static npy_int64 asfreq_AtoW(npy_int64 ordinal, asfreq_info *af_info) {
- return transform_via_day(ordinal, af_info, asfreq_AtoDT, asfreq_DTtoW);
-}
-
-static npy_int64 asfreq_AtoB(npy_int64 ordinal, asfreq_info *af_info) {
- int roll_back;
- npy_datetimestruct dts;
- npy_int64 unix_date = asfreq_AtoDT(ordinal, af_info);
-
- pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts);
- roll_back = af_info->is_end;
- return DtoB(&dts, roll_back, unix_date);
-}
-
-static npy_int64 nofunc(npy_int64 ordinal, asfreq_info *af_info) {
- return INT_ERR_CODE;
-}
-static npy_int64 no_op(npy_int64 ordinal, asfreq_info *af_info) {
- return ordinal;
-}
-
-// end of frequency specific conversion routines
-
-freq_conv_func get_asfreq_func(int fromFreq, int toFreq) {
- int fromGroup = get_freq_group(fromFreq);
- int toGroup = get_freq_group(toFreq);
-
- if (fromGroup == FR_UND) {
- fromGroup = FR_DAY;
- }
-
- switch (fromGroup) {
- case FR_ANN:
- switch (toGroup) {
- case FR_ANN:
- return &asfreq_AtoA;
- case FR_QTR:
- return &asfreq_AtoQ;
- case FR_MTH:
- return &asfreq_AtoM;
- case FR_WK:
- return &asfreq_AtoW;
- case FR_BUS:
- return &asfreq_AtoB;
- case FR_DAY:
- case FR_HR:
- case FR_MIN:
- case FR_SEC:
- case FR_MS:
- case FR_US:
- case FR_NS:
- return &asfreq_AtoDT;
-
- default:
- return &nofunc;
- }
-
- case FR_QTR:
- switch (toGroup) {
- case FR_ANN:
- return &asfreq_QtoA;
- case FR_QTR:
- return &asfreq_QtoQ;
- case FR_MTH:
- return &asfreq_QtoM;
- case FR_WK:
- return &asfreq_QtoW;
- case FR_BUS:
- return &asfreq_QtoB;
- case FR_DAY:
- case FR_HR:
- case FR_MIN:
- case FR_SEC:
- case FR_MS:
- case FR_US:
- case FR_NS:
- return &asfreq_QtoDT;
- default:
- return &nofunc;
- }
-
- case FR_MTH:
- switch (toGroup) {
- case FR_ANN:
- return &asfreq_MtoA;
- case FR_QTR:
- return &asfreq_MtoQ;
- case FR_MTH:
- return &no_op;
- case FR_WK:
- return &asfreq_MtoW;
- case FR_BUS:
- return &asfreq_MtoB;
- case FR_DAY:
- case FR_HR:
- case FR_MIN:
- case FR_SEC:
- case FR_MS:
- case FR_US:
- case FR_NS:
- return &asfreq_MtoDT;
- default:
- return &nofunc;
- }
-
- case FR_WK:
- switch (toGroup) {
- case FR_ANN:
- return &asfreq_WtoA;
- case FR_QTR:
- return &asfreq_WtoQ;
- case FR_MTH:
- return &asfreq_WtoM;
- case FR_WK:
- return &asfreq_WtoW;
- case FR_BUS:
- return &asfreq_WtoB;
- case FR_DAY:
- case FR_HR:
- case FR_MIN:
- case FR_SEC:
- case FR_MS:
- case FR_US:
- case FR_NS:
- return &asfreq_WtoDT;
- default:
- return &nofunc;
- }
-
- case FR_BUS:
- switch (toGroup) {
- case FR_ANN:
- return &asfreq_BtoA;
- case FR_QTR:
- return &asfreq_BtoQ;
- case FR_MTH:
- return &asfreq_BtoM;
- case FR_WK:
- return &asfreq_BtoW;
- case FR_BUS:
- return &no_op;
- case FR_DAY:
- case FR_HR:
- case FR_MIN:
- case FR_SEC:
- case FR_MS:
- case FR_US:
- case FR_NS:
- return &asfreq_BtoDT;
- default:
- return &nofunc;
- }
-
- case FR_DAY:
- case FR_HR:
- case FR_MIN:
- case FR_SEC:
- case FR_MS:
- case FR_US:
- case FR_NS:
- switch (toGroup) {
- case FR_ANN:
- return &asfreq_DTtoA;
- case FR_QTR:
- return &asfreq_DTtoQ;
- case FR_MTH:
- return &asfreq_DTtoM;
- case FR_WK:
- return &asfreq_DTtoW;
- case FR_BUS:
- return &asfreq_DTtoB;
- case FR_DAY:
- case FR_HR:
- case FR_MIN:
- case FR_SEC:
- case FR_MS:
- case FR_US:
- case FR_NS:
- if (fromGroup > toGroup) {
- return &downsample_daytime;
- } else {
- return &upsample_daytime;
- }
- default:
- return &nofunc;
- }
-
- default:
- return &nofunc;
- }
-}
diff --git a/pandas/_libs/tslibs/src/period_helper.h b/pandas/_libs/tslibs/src/period_helper.h
deleted file mode 100644
index f0198935bd421..0000000000000
--- a/pandas/_libs/tslibs/src/period_helper.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
-Copyright (c) 2016, PyData Development Team
-All rights reserved.
-
-Distributed under the terms of the BSD Simplified License.
-
-The full license is in the LICENSE file, distributed with this software.
-
-Borrowed and derived code from scikits.timeseries that we will expose via
-Cython to pandas. This primarily concerns interval representation and
-frequency conversion routines.
-*/
-
-#ifndef PANDAS__LIBS_SRC_PERIOD_HELPER_H_
-#define PANDAS__LIBS_SRC_PERIOD_HELPER_H_
-
-#ifndef NPY_NO_DEPRECATED_API
-#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#endif
-
-#include <Python.h>
-#include "limits.h"
-#include "numpy/ndarraytypes.h"
-
-/*** FREQUENCY CONSTANTS ***/
-
-#define FR_ANN 1000 /* Annual */
-#define FR_ANNDEC FR_ANN /* Annual - December year end*/
-#define FR_ANNJAN 1001 /* Annual - January year end*/
-#define FR_ANNFEB 1002 /* Annual - February year end*/
-#define FR_ANNMAR 1003 /* Annual - March year end*/
-#define FR_ANNAPR 1004 /* Annual - April year end*/
-#define FR_ANNMAY 1005 /* Annual - May year end*/
-#define FR_ANNJUN 1006 /* Annual - June year end*/
-#define FR_ANNJUL 1007 /* Annual - July year end*/
-#define FR_ANNAUG 1008 /* Annual - August year end*/
-#define FR_ANNSEP 1009 /* Annual - September year end*/
-#define FR_ANNOCT 1010 /* Annual - October year end*/
-#define FR_ANNNOV 1011 /* Annual - November year end*/
-
-/* The standard quarterly frequencies with various fiscal year ends
- eg, Q42005 for Q@OCT runs Aug 1, 2005 to Oct 31, 2005 */
-#define FR_QTR 2000 /* Quarterly - December year end (default quarterly) */
-#define FR_QTRDEC FR_QTR /* Quarterly - December year end */
-#define FR_QTRJAN 2001 /* Quarterly - January year end */
-#define FR_QTRFEB 2002 /* Quarterly - February year end */
-#define FR_QTRMAR 2003 /* Quarterly - March year end */
-#define FR_QTRAPR 2004 /* Quarterly - April year end */
-#define FR_QTRMAY 2005 /* Quarterly - May year end */
-#define FR_QTRJUN 2006 /* Quarterly - June year end */
-#define FR_QTRJUL 2007 /* Quarterly - July year end */
-#define FR_QTRAUG 2008 /* Quarterly - August year end */
-#define FR_QTRSEP 2009 /* Quarterly - September year end */
-#define FR_QTROCT 2010 /* Quarterly - October year end */
-#define FR_QTRNOV 2011 /* Quarterly - November year end */
-
-#define FR_MTH 3000 /* Monthly */
-
-#define FR_WK 4000 /* Weekly */
-#define FR_WKSUN FR_WK /* Weekly - Sunday end of week */
-#define FR_WKMON 4001 /* Weekly - Monday end of week */
-#define FR_WKTUE 4002 /* Weekly - Tuesday end of week */
-#define FR_WKWED 4003 /* Weekly - Wednesday end of week */
-#define FR_WKTHU 4004 /* Weekly - Thursday end of week */
-#define FR_WKFRI 4005 /* Weekly - Friday end of week */
-#define FR_WKSAT 4006 /* Weekly - Saturday end of week */
-
-#define FR_BUS 5000 /* Business days */
-#define FR_DAY 6000 /* Daily */
-#define FR_HR 7000 /* Hourly */
-#define FR_MIN 8000 /* Minutely */
-#define FR_SEC 9000 /* Secondly */
-#define FR_MS 10000 /* Millisecondly */
-#define FR_US 11000 /* Microsecondly */
-#define FR_NS 12000 /* Nanosecondly */
-
-#define FR_UND -10000 /* Undefined */
-
-#define INT_ERR_CODE NPY_MIN_INT32
-
-typedef struct asfreq_info {
- int is_end;
- // char relation == 'S' (for START) --> is_end = 0
- // char relation == 'E' (for END) --> is_end = 1
-
- int from_end;
- int to_end;
- // weekly:
- // from_end --> day the week ends on in the "from" frequency
- // to_end --> day the week ends on in the "to" frequency
- //
- // annual:
- // from_end --> month the year ends on in the "from" frequency
- // to_end --> month the year ends on in the "to" frequency
- //
- // quarterly:
- // from_end --> month the year ends on in the "from" frequency
- // to_end --> month the year ends on in the "to" frequency
-
- npy_int64 intraday_conversion_factor;
-} asfreq_info;
-
-typedef npy_int64 (*freq_conv_func)(npy_int64, asfreq_info *af_info);
-
-/*
- * new pandas API helper functions here
- */
-
-freq_conv_func get_asfreq_func(int fromFreq, int toFreq);
-
-npy_int64 get_daytime_conversion_factor(int from_index, int to_index);
-int max_value(int a, int b);
-
-#endif // PANDAS__LIBS_SRC_PERIOD_HELPER_H_
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 9e7f1d94934ba..8b8475cc3727b 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -162,7 +162,7 @@ cpdef convert_to_timedelta64(object ts, object unit):
if ts.astype('int64') == NPY_NAT:
return np.timedelta64(NPY_NAT)
elif is_timedelta64_object(ts):
- ts = ts.astype("m8[{0}]".format(unit.lower()))
+ ts = ts.astype("m8[{unit}]".format(unit=unit.lower()))
elif is_integer_object(ts):
if ts == NPY_NAT:
return np.timedelta64(NPY_NAT)
@@ -265,7 +265,7 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
m = 1L
p = 0
else:
- raise ValueError("cannot cast unit {0}".format(unit))
+ raise ValueError("cannot cast unit {unit}".format(unit=unit))
# just give me the unit back
if ts is None:
@@ -273,11 +273,11 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
# cast the unit, multiply base/frace separately
# to avoid precision issues from float -> int
- base = <int64_t> ts
+ base = <int64_t>ts
frac = ts - base
if p:
frac = round(frac, p)
- return <int64_t> (base * m) + <int64_t> (frac * m)
+ return <int64_t>(base * m) + <int64_t>(frac * m)
cdef inline _decode_if_necessary(object ts):
@@ -296,10 +296,10 @@ cdef inline parse_timedelta_string(object ts):
cdef:
unicode c
- bint neg=0, have_dot=0, have_value=0, have_hhmmss=0
- object current_unit=None
- int64_t result=0, m=0, r
- list number=[], frac=[], unit=[]
+ bint neg = 0, have_dot = 0, have_value = 0, have_hhmmss = 0
+ object current_unit = None
+ int64_t result = 0, m = 0, r
+ list number = [], frac = [], unit = []
# neg : tracks if we have a leading negative for the value
# have_dot : tracks if we are processing a dot (either post hhmmss or
@@ -374,7 +374,7 @@ cdef inline parse_timedelta_string(object ts):
have_hhmmss = 1
else:
raise ValueError("expecting hh:mm:ss format, "
- "received: {0}".format(ts))
+ "received: {ts}".format(ts=ts))
unit, number = [], []
@@ -483,7 +483,7 @@ cdef inline timedelta_from_spec(object number, object frac, object unit):
unit = ''.join(unit)
unit = timedelta_abbrevs[unit.lower()]
except KeyError:
- raise ValueError("invalid abbreviation: {0}".format(unit))
+ raise ValueError("invalid abbreviation: {unit}".format(unit=unit))
n = ''.join(number) + '.' + ''.join(frac)
return cast_from_unit(float(n), unit)
@@ -592,10 +592,10 @@ cdef inline int64_t parse_iso_format_string(object ts) except? -1:
cdef:
unicode c
int64_t result = 0, r
- int p=0
+ int p = 0
object dec_unit = 'ms', err_msg
- bint have_dot=0, have_value=0, neg=0
- list number=[], unit=[]
+ bint have_dot = 0, have_value = 0, neg = 0
+ list number = [], unit = []
ts = _decode_if_necessary(ts)
@@ -682,8 +682,8 @@ cdef _to_py_int_float(v):
return int(v)
elif is_float_object(v):
return float(v)
- raise TypeError("Invalid type {0}. Must be int or "
- "float.".format(type(v)))
+ raise TypeError("Invalid type {typ}. Must be int or "
+ "float.".format(typ=type(v)))
# Similar to Timestamp/datetime, this is a construction requirement for
@@ -729,9 +729,10 @@ cdef class _Timedelta(timedelta):
return True
# only allow ==, != ops
- raise TypeError('Cannot compare type {!r} with type ' \
- '{!r}'.format(type(self).__name__,
- type(other).__name__))
+ raise TypeError('Cannot compare type {cls} with '
+ 'type {other}'
+ .format(cls=type(self).__name__,
+ other=type(other).__name__))
if util.is_array(other):
return PyObject_RichCompare(np.array([self]), other, op)
return PyObject_RichCompare(other, self, reverse_ops[op])
@@ -740,9 +741,9 @@ cdef class _Timedelta(timedelta):
return False
elif op == Py_NE:
return True
- raise TypeError('Cannot compare type {!r} with type ' \
- '{!r}'.format(type(self).__name__,
- type(other).__name__))
+ raise TypeError('Cannot compare type {cls} with type {other}'
+ .format(cls=type(self).__name__,
+ other=type(other).__name__))
return cmp_scalar(self.value, ots.value, op)
@@ -980,8 +981,8 @@ cdef class _Timedelta(timedelta):
sign = " "
if format == 'all':
- fmt = "{days} days{sign}{hours:02}:{minutes:02}:{seconds:02}." \
- "{milliseconds:03}{microseconds:03}{nanoseconds:03}"
+ fmt = ("{days} days{sign}{hours:02}:{minutes:02}:{seconds:02}."
+ "{milliseconds:03}{microseconds:03}{nanoseconds:03}")
else:
# if we have a partial day
subs = (self._h or self._m or self._s or
@@ -1006,7 +1007,7 @@ cdef class _Timedelta(timedelta):
return fmt.format(**comp_dict)
def __repr__(self):
- return "Timedelta('{0}')".format(self._repr_base(format='long'))
+ return "Timedelta('{val}')".format(val=self._repr_base(format='long'))
def __str__(self):
return self._repr_base(format='long')
@@ -1060,8 +1061,8 @@ cdef class _Timedelta(timedelta):
components.nanoseconds)
# Trim unnecessary 0s, 1.000000000 -> 1
seconds = seconds.rstrip('0').rstrip('.')
- tpl = 'P{td.days}DT{td.hours}H{td.minutes}M{seconds}S'.format(
- td=components, seconds=seconds)
+ tpl = ('P{td.days}DT{td.hours}H{td.minutes}M{seconds}S'
+ .format(td=components, seconds=seconds))
return tpl
diff --git a/setup.py b/setup.py
index 3289f1e99b87f..4fdfc0ab7de0d 100755
--- a/setup.py
+++ b/setup.py
@@ -234,7 +234,6 @@ def initialize_options(self):
ujson_lib = pjoin(base, 'ujson', 'lib')
self._clean_exclude = [pjoin(dt, 'np_datetime.c'),
pjoin(dt, 'np_datetime_strings.c'),
- pjoin(tsbase, 'period_helper.c'),
pjoin(parser, 'tokenizer.c'),
pjoin(parser, 'io.c'),
pjoin(ujson_python, 'ujson.c'),
@@ -616,10 +615,8 @@ def srcpath(name=None, suffix='.pyx', subdir='src'):
'_libs.tslibs.period': {
'pyxfile': '_libs/tslibs/period',
'include': ts_include,
- 'depends': tseries_depends + [
- 'pandas/_libs/tslibs/src/period_helper.h'],
- 'sources': np_datetime_sources + [
- 'pandas/_libs/tslibs/src/period_helper.c']},
+ 'depends': tseries_depends,
+ 'sources': np_datetime_sources},
'_libs.tslibs.resolution': {
'pyxfile': '_libs/tslibs/resolution',
'include': ts_include,
| - [x] closes #19465
There is probably some cleanup that can be done now that period logic is self-contained.
No evident perf impact.
```
asv continuous -E virtualenv -f 1.1 master HEAD -b period
[...]
before after ratio
[776fed3a] [7cc0d9e8]
+ 1.14±0μs 1.28±0.01μs 1.13 period.PeriodProperties.time_property('M', 'qyear')
- 579±30ms 509±4ms 0.88 groupby.Datelike.time_sum('period_range')
```
(will post results from a couple more runs as they become available)
<b> update<b/> two more runs show zero change, not even noise.
<b>update</b> Oh, also did some unrelated modernizing of string formatting in timedeltas. | https://api.github.com/repos/pandas-dev/pandas/pulls/22196 | 2018-08-04T20:46:44Z | 2018-08-07T12:57:38Z | 2018-08-07T12:57:37Z | 2018-08-08T15:51:31Z |
Run tests in conda build [ci skip] | diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml
index 2bc42c1bd2dec..f92090fecccf3 100644
--- a/conda.recipe/meta.yaml
+++ b/conda.recipe/meta.yaml
@@ -29,8 +29,11 @@ requirements:
- pytz
test:
- imports:
- - pandas
+ requires:
+ - pytest
+ commands:
+ - python -c "import pandas; pandas.test()"
+
about:
home: http://pandas.pydata.org
| [ci skip]
This is to aid in the release process. Easiest to let conda-build run the tests by default, and disable them where we don't want to run them.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22190 | 2018-08-03T15:39:53Z | 2018-08-03T16:50:14Z | 2018-08-03T16:50:14Z | 2018-08-03T16:50:16Z |
DOC: Update the Series.str.len docstring | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 6349af4d2e0ac..6ef736d8a0b09 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -2801,11 +2801,48 @@ def rindex(self, sub, start=0, end=None):
return self._wrap_result(result)
_shared_docs['len'] = ("""
- Compute length of each string in the Series/Index.
+ Computes the length of each element in the Series/Index. The element may be
+ a sequence (such as a string, tuple or list) or a collection
+ (such as a dictionary).
Returns
-------
- lengths : Series/Index of integer values
+ Series or Index of int
+ A Series or Index of integer values indicating the length of each
+ element in the Series or Index.
+
+ See Also
+ --------
+ str.len : Python built-in function returning the length of an object.
+ Series.size : Returns the length of the Series.
+
+ Examples
+ --------
+ Returns the length (number of characters) in a string. Returns the
+ number of entries for dictionaries, lists or tuples.
+
+ >>> s = pd.Series(['dog',
+ ... '',
+ ... 5,
+ ... {'foo' : 'bar'},
+ ... [2, 3, 5, 7],
+ ... ('one', 'two', 'three')])
+ >>> s
+ 0 dog
+ 1
+ 2 5
+ 3 {'foo': 'bar'}
+ 4 [2, 3, 5, 7]
+ 5 (one, two, three)
+ dtype: object
+ >>> s.str.len()
+ 0 3.0
+ 1 0.0
+ 2 NaN
+ 3 1.0
+ 4 4.0
+ 5 3.0
+ dtype: float64
""")
len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)
| [X] PR title is "DOC: update the docstring"
[X] The PEP8 style check passes: git diff upstream/master -u -- "*.py" | flake8 --diff
[X] The html version looks good: python doc/make.py --single
| https://api.github.com/repos/pandas-dev/pandas/pulls/22187 | 2018-08-03T09:50:15Z | 2018-08-09T10:59:10Z | 2018-08-09T10:59:10Z | 2018-08-09T10:59:15Z |
CI: Fix Travis failures due to lint.sh on pandas/core/strings.py | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index b5b44a361a98d..39ecf7f49bc2e 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -293,7 +293,8 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
See Also
--------
match : analogous, but stricter, relying on re.match instead of re.search
- Series.str.startswith : Test if the start of each string element matches a pattern.
+ Series.str.startswith : Test if the start of each string element matches a
+ pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
| Travis builds are failing since `lint.sh` is catching a line that's too long in `pandas/core/strings.py`, e.g. https://travis-ci.org/pandas-dev/pandas/jobs/411505206 | https://api.github.com/repos/pandas-dev/pandas/pulls/22184 | 2018-08-02T23:29:58Z | 2018-08-03T01:58:33Z | 2018-08-03T01:58:33Z | 2018-08-03T16:39:40Z |
implement masked_arith_op to de-duplicate ops code | diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 9bf952633ccff..dc139a8e14f66 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -27,6 +27,7 @@
from pandas.core.dtypes.common import (
needs_i8_conversion,
is_datetimelike_v_numeric,
+ is_period_dtype,
is_integer_dtype, is_categorical_dtype,
is_object_dtype, is_timedelta64_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
@@ -41,7 +42,7 @@
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame, ABCPanel,
- ABCIndex,
+ ABCIndex, ABCIndexClass,
ABCSparseSeries, ABCSparseArray)
@@ -788,6 +789,57 @@ def mask_cmp_op(x, y, op, allowed_types):
return result
+def masked_arith_op(x, y, op):
+ """
+ If the given arithmetic operation fails, attempt it again on
+ only the non-null elements of the input array(s).
+
+ Parameters
+ ----------
+ x : np.ndarray
+ y : np.ndarray, Series, Index
+ op : binary operator
+ """
+ # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
+ # the logic valid for both Series and DataFrame ops.
+ xrav = x.ravel()
+ assert isinstance(x, (np.ndarray, ABCSeries)), type(x)
+ if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)):
+ dtype = find_common_type([x.dtype, y.dtype])
+ result = np.empty(x.size, dtype=dtype)
+
+ # PeriodIndex.ravel() returns int64 dtype, so we have
+ # to work around that case. See GH#19956
+ yrav = y if is_period_dtype(y) else y.ravel()
+ mask = notna(xrav) & notna(yrav)
+
+ if yrav.shape != mask.shape:
+ # FIXME: GH#5284, GH#5035, GH#19448
+ # Without specifically raising here we get mismatched
+ # errors in Py3 (TypeError) vs Py2 (ValueError)
+ # Note: Only = an issue in DataFrame case
+ raise ValueError('Cannot broadcast operands together.')
+
+ if mask.any():
+ with np.errstate(all='ignore'):
+ result[mask] = op(xrav[mask],
+ com.values_from_object(yrav[mask]))
+
+ else:
+ assert is_scalar(y), type(y)
+ assert isinstance(x, np.ndarray), type(x)
+ # mask is only meaningful for x
+ result = np.empty(x.size, dtype=x.dtype)
+ mask = notna(xrav)
+ if mask.any():
+ with np.errstate(all='ignore'):
+ result[mask] = op(xrav[mask], y)
+
+ result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
+ result = result.reshape(x.shape) # 2D compat
+ return result
+
+
def invalid_comparison(left, right, op):
"""
If a comparison has mismatched types and is not necessarily meaningful,
@@ -880,8 +932,7 @@ def _get_method_wrappers(cls):
return arith_flex, comp_flex, arith_special, comp_special, bool_special
-def _create_methods(cls, arith_method, comp_method, bool_method,
- special=False):
+def _create_methods(cls, arith_method, comp_method, bool_method, special):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
@@ -1136,19 +1187,7 @@ def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
except TypeError:
- if isinstance(y, (np.ndarray, ABCSeries, pd.Index)):
- dtype = find_common_type([x.dtype, y.dtype])
- result = np.empty(x.size, dtype=dtype)
- mask = notna(x) & notna(y)
- result[mask] = op(x[mask], com.values_from_object(y[mask]))
- else:
- assert isinstance(x, np.ndarray)
- assert is_scalar(y)
- result = np.empty(len(x), dtype=x.dtype)
- mask = notna(x)
- result[mask] = op(x[mask], y)
-
- result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
+ result = masked_arith_op(x, y, op)
result = missing.fill_zeros(result, x, y, op_name, fill_zeros)
return result
@@ -1675,40 +1714,7 @@ def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
except TypeError:
- xrav = x.ravel()
- if isinstance(y, (np.ndarray, ABCSeries)):
- dtype = find_common_type([x.dtype, y.dtype])
- result = np.empty(x.size, dtype=dtype)
- yrav = y.ravel()
- mask = notna(xrav) & notna(yrav)
- xrav = xrav[mask]
-
- if yrav.shape != mask.shape:
- # FIXME: GH#5284, GH#5035, GH#19448
- # Without specifically raising here we get mismatched
- # errors in Py3 (TypeError) vs Py2 (ValueError)
- raise ValueError('Cannot broadcast operands together.')
-
- yrav = yrav[mask]
- if xrav.size:
- with np.errstate(all='ignore'):
- result[mask] = op(xrav, yrav)
-
- elif isinstance(x, np.ndarray):
- # mask is only meaningful for x
- result = np.empty(x.size, dtype=x.dtype)
- mask = notna(xrav)
- xrav = xrav[mask]
- if xrav.size:
- with np.errstate(all='ignore'):
- result[mask] = op(xrav, y)
- else:
- raise TypeError("cannot perform operation {op} between "
- "objects of type {x} and {y}"
- .format(op=op_name, x=type(x), y=type(y)))
-
- result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
- result = result.reshape(x.shape)
+ result = masked_arith_op(x, y, op)
result = missing.fill_zeros(result, x, y, op_name, fill_zeros)
| ATM _arith_method_SERIES and _arith_method_FRAME have really similar fallback code. This de-duplicates those.
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/22182 | 2018-08-02T22:23:03Z | 2018-08-10T10:00:26Z | 2018-08-10T10:00:26Z | 2018-08-10T17:20:21Z |
Standardize special case in tz_conversion functions | diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index a459b185fa48c..74a9823a85016 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -612,7 +612,7 @@ cpdef inline datetime localize_pydatetime(datetime dt, object tz):
# ----------------------------------------------------------------------
# Timezone Conversion
-cdef inline int64_t[:] _tz_convert_dst(ndarray[int64_t] values, tzinfo tz,
+cdef inline int64_t[:] _tz_convert_dst(int64_t[:] values, tzinfo tz,
bint to_utc=True):
"""
tz_convert for non-UTC non-tzlocal cases where we have to check
@@ -631,11 +631,10 @@ cdef inline int64_t[:] _tz_convert_dst(ndarray[int64_t] values, tzinfo tz,
"""
cdef:
Py_ssize_t n = len(values)
- Py_ssize_t i, j, pos
+ Py_ssize_t i, pos
int64_t[:] result = np.empty(n, dtype=np.int64)
- ndarray[int64_t] tt, trans
+ ndarray[int64_t] trans
int64_t[:] deltas
- Py_ssize_t[:] posn
int64_t v
trans, deltas, typ = get_dst_info(tz)
@@ -643,21 +642,15 @@ cdef inline int64_t[:] _tz_convert_dst(ndarray[int64_t] values, tzinfo tz,
# We add `offset` below instead of subtracting it
deltas = -1 * np.array(deltas, dtype='i8')
- tt = values[values != NPY_NAT]
- if not len(tt):
- # if all NaT, return all NaT
- return values
-
- posn = trans.searchsorted(tt, side='right')
-
- j = 0
for i in range(n):
v = values[i]
if v == NPY_NAT:
result[i] = v
else:
- pos = posn[j] - 1
- j += 1
+ # TODO: Is it more efficient to call searchsorted pointwise or
+ # on `values` outside the loop? We are not consistent about this.
+ # relative effiency of pointwise increases with number of iNaTs
+ pos = trans.searchsorted(v, side='right') - 1
if pos < 0:
raise ValueError('First time before start of DST info')
result[i] = v - deltas[pos]
@@ -734,7 +727,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2):
Py_ssize_t pos
int64_t v, offset, utc_date
npy_datetimestruct dts
- ndarray[int64_t] arr # TODO: Is there a lighter-weight way to do this?
+ int64_t arr[1]
# See GH#17734 We should always be converting either from UTC or to UTC
assert (is_utc(tz1) or tz1 == 'UTC') or (is_utc(tz2) or tz2 == 'UTC')
@@ -746,7 +739,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2):
if is_tzlocal(tz1):
utc_date = _tz_convert_tzlocal_utc(val, tz1, to_utc=True)
elif get_timezone(tz1) != 'UTC':
- arr = np.array([val])
+ arr[0] = val
utc_date = _tz_convert_dst(arr, tz1, to_utc=True)[0]
else:
utc_date = val
@@ -757,7 +750,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2):
return _tz_convert_tzlocal_utc(utc_date, tz2, to_utc=False)
else:
# Convert UTC to other timezone
- arr = np.array([utc_date])
+ arr[0] = utc_date
# Note: at least with cython 0.28.3, doing a lookup `[0]` in the next
# line is sensitive to the declared return type of _tz_convert_dst;
# if it is declared as returning ndarray[int64_t], a compile-time error
@@ -765,9 +758,46 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2):
return _tz_convert_dst(arr, tz2, to_utc=False)[0]
+cdef inline int64_t[:] _tz_convert_one_way(int64_t[:] vals, object tz,
+ bint to_utc):
+ """
+ Convert the given values (in i8) either to UTC or from UTC.
+
+ Parameters
+ ----------
+ vals : int64 ndarray
+ tz1 : string / timezone object
+ to_utc : bint
+
+ Returns
+ -------
+ converted : ndarray[int64_t]
+ """
+ cdef:
+ int64_t[:] converted, result
+ Py_ssize_t i, n = len(vals)
+ int64_t val
+
+ if get_timezone(tz) != 'UTC':
+ converted = np.empty(n, dtype=np.int64)
+ if is_tzlocal(tz):
+ for i in range(n):
+ val = vals[i]
+ if val == NPY_NAT:
+ converted[i] = NPY_NAT
+ else:
+ converted[i] = _tz_convert_tzlocal_utc(val, tz, to_utc)
+ else:
+ converted = _tz_convert_dst(vals, tz, to_utc)
+ else:
+ converted = vals
+
+ return converted
+
+
@cython.boundscheck(False)
@cython.wraparound(False)
-def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
+def tz_convert(int64_t[:] vals, object tz1, object tz2):
"""
Convert the values (in i8) from timezone1 to timezone2
@@ -781,45 +811,16 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
-------
int64 ndarray of converted
"""
-
cdef:
- ndarray[int64_t] utc_dates, result
- Py_ssize_t i, j, pos, n = len(vals)
- int64_t v
+ int64_t[:] utc_dates, converted
if len(vals) == 0:
return np.array([], dtype=np.int64)
# Convert to UTC
- if get_timezone(tz1) != 'UTC':
- utc_dates = np.empty(n, dtype=np.int64)
- if is_tzlocal(tz1):
- for i in range(n):
- v = vals[i]
- if v == NPY_NAT:
- utc_dates[i] = NPY_NAT
- else:
- utc_dates[i] = _tz_convert_tzlocal_utc(v, tz1, to_utc=True)
- else:
- utc_dates = np.array(_tz_convert_dst(vals, tz1, to_utc=True))
- else:
- utc_dates = vals
-
- if get_timezone(tz2) == 'UTC':
- return utc_dates
-
- elif is_tzlocal(tz2):
- result = np.zeros(n, dtype=np.int64)
- for i in range(n):
- v = utc_dates[i]
- if v == NPY_NAT:
- result[i] = NPY_NAT
- else:
- result[i] = _tz_convert_tzlocal_utc(v, tz2, to_utc=False)
- return result
- else:
- # Convert UTC to other timezone
- return np.array(_tz_convert_dst(utc_dates, tz2, to_utc=False))
+ utc_dates = _tz_convert_one_way(vals, tz1, to_utc=True)
+ converted = _tz_convert_one_way(utc_dates, tz2, to_utc=False)
+ return np.array(converted, dtype=np.int64)
# TODO: cdef scalar version to call from convert_str_to_tsobject
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 2efe506d2c154..7a279a41709ec 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -5,7 +5,7 @@ cimport cython
from cython cimport Py_ssize_t
import time
-from cpython.datetime cimport (PyDateTime_IMPORT, PyDateTime_CheckExact,
+from cpython.datetime cimport (PyDateTime_IMPORT,
datetime, timedelta,
time as dt_time)
PyDateTime_IMPORT
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index afda2046fd12d..61fb48c6913d3 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -6,7 +6,6 @@ Parsing functions for datetime and datetime-like strings.
import sys
import re
-cimport cython
from cython cimport Py_ssize_t
diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx
index 18cc21ccd59e0..83be739a6ae0a 100644
--- a/pandas/_libs/tslibs/resolution.pyx
+++ b/pandas/_libs/tslibs/resolution.pyx
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
# cython: profile=False
-cimport cython
from cython cimport Py_ssize_t
import numpy as np
@@ -11,8 +10,7 @@ from util cimport is_string_object, get_nat
from np_datetime cimport npy_datetimestruct, dt64_to_dtstruct
from frequencies cimport get_freq_code
-from timezones cimport (is_utc, is_tzlocal,
- maybe_get_tz, get_dst_info)
+from timezones cimport is_utc, is_tzlocal, maybe_get_tz, get_dst_info
from conversion cimport tz_convert_utc_to_tzlocal
from ccalendar cimport get_days_in_month
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index 59d673881bb40..8e7c55051a3c0 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -23,7 +23,6 @@ except:
import pytz
from cython cimport Py_ssize_t
-from cpython cimport PyFloat_Check
import numpy as np
from numpy cimport int64_t
@@ -622,6 +621,7 @@ cdef _calc_julian_from_U_or_W(int year, int week_of_year,
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
+
cdef parse_timezone_directive(object z):
"""
Parse the '%z' directive and return a pytz.FixedOffset
| ATM `_tz_convert_dst` handles iteration and masking differently (and more complicated-ly) than other related functions. AFAICT the reasoning is that this implementation is performant when the passed `vals` have a high proportion of `iNaT`s.
By changing this special case to conform to the standard pattern, we allow other functions to call this without having to do any special casting, generally simplify things, and open the door to de-duplicating these functions (which this PR also starts doing by implementing `_tz_convert`)
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22181 | 2018-08-02T21:34:29Z | 2018-08-08T10:49:19Z | 2018-08-08T10:49:19Z | 2018-08-08T15:50:40Z |
Documentation: typo fixes in MultiIndex / Advanced Indexing | diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index e530ece2e12c5..2be1a53aa6c93 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -21,7 +21,7 @@ See the :ref:`Indexing and Selecting Data <indexing>` for general indexing docum
.. warning::
- Whether a copy or a reference is returned for a setting operation, may
+ Whether a copy or a reference is returned for a setting operation may
depend on the context. This is sometimes called ``chained assignment`` and
should be avoided. See :ref:`Returning a View versus Copy
<indexing.view_versus_copy>`.
@@ -172,7 +172,7 @@ Defined Levels
~~~~~~~~~~~~~~
The repr of a ``MultiIndex`` shows all the defined levels of an index, even
-if the they are not actually used. When slicing an index, you may notice this.
+if they are not actually used. When slicing an index, you may notice this.
For example:
.. ipython:: python
@@ -379,7 +379,7 @@ slicers on a single axis.
dfmi.loc(axis=0)[:, :, ['C1', 'C3']]
-Furthermore you can *set* the values using the following methods.
+Furthermore, you can *set* the values using the following methods.
.. ipython:: python
@@ -559,7 +559,7 @@ return a copy of the data rather than a view:
.. _advanced.unsorted:
-Furthermore if you try to index something that is not fully lexsorted, this can raise:
+Furthermore, if you try to index something that is not fully lexsorted, this can raise:
.. code-block:: ipython
@@ -659,7 +659,7 @@ Index Types
We have discussed ``MultiIndex`` in the previous sections pretty extensively. ``DatetimeIndex`` and ``PeriodIndex``
are shown :ref:`here <timeseries.overview>`, and information about
-`TimedeltaIndex`` is found :ref:`here <timedeltas.timedeltas>`.
+``TimedeltaIndex`` is found :ref:`here <timedeltas.timedeltas>`.
In the following sub-sections we will highlight some other index types.
@@ -835,8 +835,8 @@ In non-float indexes, slicing using floats will raise a ``TypeError``.
Here is a typical use-case for using this type of indexing. Imagine that you have a somewhat
-irregular timedelta-like indexing scheme, but the data is recorded as floats. This could for
-example be millisecond offsets.
+irregular timedelta-like indexing scheme, but the data is recorded as floats. This could, for
+example, be millisecond offsets.
.. ipython:: python
| https://api.github.com/repos/pandas-dev/pandas/pulls/22179 | 2018-08-02T20:45:14Z | 2018-08-02T23:26:47Z | 2018-08-02T23:26:46Z | 2018-08-02T23:59:11Z | |
0.23.4 backports 1 | diff --git a/appveyor.yml b/appveyor.yml
index f70fc829ec971..c6199c1493f22 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -20,12 +20,14 @@ environment:
matrix:
- CONDA_ROOT: "C:\\Miniconda3_64"
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
PYTHON_VERSION: "3.6"
PYTHON_ARCH: "64"
CONDA_PY: "36"
CONDA_NPY: "113"
- CONDA_ROOT: "C:\\Miniconda3_64"
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
PYTHON_VERSION: "2.7"
PYTHON_ARCH: "64"
CONDA_PY: "27"
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index a52ba22cf36d2..9f8635743ea6a 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -6,6 +6,11 @@ v0.23.1
This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes
and bug fixes. We recommend that all users upgrade to this version.
+.. warning::
+
+ Starting January 1, 2019, pandas feature releases will support Python 3 only.
+ See :ref:`install.dropping-27` for more.
+
.. contents:: What's new in v0.23.1
:local:
:backlinks: none
diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt
index bd86576ad8586..77ad860fc4e8e 100644
--- a/doc/source/whatsnew/v0.23.2.txt
+++ b/doc/source/whatsnew/v0.23.2.txt
@@ -11,6 +11,10 @@ and bug fixes. We recommend that all users upgrade to this version.
Pandas 0.23.2 is first pandas release that's compatible with
Python 3.7 (:issue:`20552`)
+.. warning::
+
+ Starting January 1, 2019, pandas feature releases will support Python 3 only.
+ See :ref:`install.dropping-27` for more.
.. contents:: What's new in v0.23.2
:local:
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt
index a30fbc75f11f8..9a3ad3f61ee49 100644
--- a/doc/source/whatsnew/v0.23.4.txt
+++ b/doc/source/whatsnew/v0.23.4.txt
@@ -1,11 +1,15 @@
.. _whatsnew_0234:
-v0.23.4
--------
+v0.23.4 (August 3, 2018)
+------------------------
This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes
and bug fixes. We recommend that all users upgrade to this version.
+.. warning::
+
+ Starting January 1, 2019, pandas feature releases will support Python 3 only.
+ See :ref:`install.dropping-27` for more.
.. contents:: What's new in v0.23.4
:local:
@@ -16,8 +20,7 @@ and bug fixes. We recommend that all users upgrade to this version.
Fixed Regressions
~~~~~~~~~~~~~~~~~
--
--
+- Python 3.7 with Windows gave all missing values for rolling variance calculations (:issue:`21813`)
.. _whatsnew_0234.bug_fixes:
@@ -28,37 +31,6 @@ Bug Fixes
- Bug where calling :func:`DataFrameGroupBy.agg` with a list of functions including ``ohlc`` as the non-initial element would raise a ``ValueError`` (:issue:`21716`)
- Bug in ``roll_quantile`` caused a memory leak when calling ``.rolling(...).quantile(q)`` with ``q`` in (0,1) (:issue:`21965`)
--
-
-**Conversion**
-
--
--
-
-**Indexing**
-
--
--
-
-**I/O**
-
--
--
-
-**Categorical**
-
--
--
-
-**Timezones**
-
--
--
-
-**Timedelta**
-
--
--
**Missing**
diff --git a/pandas/_libs/src/headers/cmath b/pandas/_libs/src/headers/cmath
index d8e2239406cae..2bccf9bb13d77 100644
--- a/pandas/_libs/src/headers/cmath
+++ b/pandas/_libs/src/headers/cmath
@@ -6,6 +6,7 @@
#if defined(_MSC_VER) && (_MSC_VER < 1800)
#include <cmath>
namespace std {
+ __inline int isnan(double x) { return _isnan(x); }
__inline int signbit(double num) { return _copysign(1.0, num) < 0; }
}
#else
diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx
index a77433e5d1115..6954094b46e69 100644
--- a/pandas/_libs/window.pyx
+++ b/pandas/_libs/window.pyx
@@ -14,6 +14,7 @@ cnp.import_array()
cdef extern from "../src/headers/cmath" namespace "std":
+ bint isnan(double) nogil
int signbit(double) nogil
double sqrt(double x) nogil
@@ -654,16 +655,16 @@ cdef inline void add_var(double val, double *nobs, double *mean_x,
double *ssqdm_x) nogil:
""" add a value from the var calc """
cdef double delta
-
- # Not NaN
- if val == val:
- nobs[0] = nobs[0] + 1
-
- # a part of Welford's method for the online variance-calculation
- # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
- delta = val - mean_x[0]
- mean_x[0] = mean_x[0] + delta / nobs[0]
- ssqdm_x[0] = ssqdm_x[0] + ((nobs[0] - 1) * delta ** 2) / nobs[0]
+ # `isnan` instead of equality as fix for GH-21813, msvc 2017 bug
+ if isnan(val):
+ return
+
+ nobs[0] = nobs[0] + 1
+ # a part of Welford's method for the online variance-calculation
+ # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ delta = val - mean_x[0]
+ mean_x[0] = mean_x[0] + delta / nobs[0]
+ ssqdm_x[0] = ssqdm_x[0] + ((nobs[0] - 1) * delta ** 2) / nobs[0]
cdef inline void remove_var(double val, double *nobs, double *mean_x,
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 7dafc9603f96d..3c6b52074763e 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -14,7 +14,7 @@
from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index,
RangeIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex,
- isna, compat, concat, Timestamp)
+ isna, compat, concat, Timestamp, _np_version_under1p15)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
@@ -2140,6 +2140,10 @@ def test_unimplemented_dtypes_table_columns(self):
# this fails because we have a date in the object block......
pytest.raises(TypeError, store.append, 'df_unimplemented', df)
+ @pytest.mark.skipif(
+ not _np_version_under1p15,
+ reason=("pytables conda build package needs build "
+ "with numpy 1.15: gh-22098"))
def test_calendar_roundtrip_issue(self):
# 8591
| Closes https://github.com/pandas-dev/pandas/issues/22166 | https://api.github.com/repos/pandas-dev/pandas/pulls/22178 | 2018-08-02T20:28:21Z | 2018-08-03T17:17:10Z | 2018-08-03T17:17:10Z | 2018-08-03T17:21:53Z |
0.23.4 whatsnew [ci skip] | diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt
index c17f4ffdd6b8e..9a3ad3f61ee49 100644
--- a/doc/source/whatsnew/v0.23.4.txt
+++ b/doc/source/whatsnew/v0.23.4.txt
@@ -1,7 +1,7 @@
.. _whatsnew_0234:
-v0.23.4
--------
+v0.23.4 (August 3, 2018)
+------------------------
This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes
and bug fixes. We recommend that all users upgrade to this version.
@@ -21,7 +21,6 @@ Fixed Regressions
~~~~~~~~~~~~~~~~~
- Python 3.7 with Windows gave all missing values for rolling variance calculations (:issue:`21813`)
--
.. _whatsnew_0234.bug_fixes:
@@ -32,37 +31,6 @@ Bug Fixes
- Bug where calling :func:`DataFrameGroupBy.agg` with a list of functions including ``ohlc`` as the non-initial element would raise a ``ValueError`` (:issue:`21716`)
- Bug in ``roll_quantile`` caused a memory leak when calling ``.rolling(...).quantile(q)`` with ``q`` in (0,1) (:issue:`21965`)
--
-
-**Conversion**
-
--
--
-
-**Indexing**
-
--
--
-
-**I/O**
-
--
--
-
-**Categorical**
-
--
--
-
-**Timezones**
-
--
--
-
-**Timedelta**
-
--
--
**Missing**
| [ci skip] | https://api.github.com/repos/pandas-dev/pandas/pulls/22177 | 2018-08-02T20:26:14Z | 2018-08-02T20:26:41Z | 2018-08-02T20:26:41Z | 2018-08-02T20:26:43Z |
DOC: update the Series.str.contains docstring | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 6349af4d2e0ac..8317473a99f67 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -293,6 +293,8 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
See Also
--------
match : analogous, but stricter, relying on re.match instead of re.search
+ Series.str.startswith : Test if the start of each string element matches a pattern.
+ Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
| updated see also section with two more examples | https://api.github.com/repos/pandas-dev/pandas/pulls/22176 | 2018-08-02T20:04:46Z | 2018-08-02T20:33:02Z | 2018-08-02T20:33:02Z | 2018-08-02T23:28:05Z |
DOC: updating the Series.str.cat docstring | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 6349af4d2e0ac..0c0ddad5fb6d9 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -2063,6 +2063,7 @@ def cat(self, others=None, sep=None, na_rep=None, join=None):
See Also
--------
split : Split each string in the Series/Index
+ join : Join lists contained as elements in the Series/Index
Examples
--------
| - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/22175 | 2018-08-02T19:14:43Z | 2018-08-02T20:35:08Z | 2018-08-02T20:35:08Z | 2018-08-02T20:35:10Z |
DOC: update the Series.str.join docstring | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 6349af4d2e0ac..7f630e3a3d8a4 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -1105,10 +1105,17 @@ def str_join(arr, sep):
Returns
-------
Series/Index: object
+ The list entries concatenated by intervening occurrences of the
+ delimiter.
+
+ Raises
+ -------
+ AttributeError
+ If the supplied Series contains neither strings nor lists.
Notes
-----
- If any of the lists does not contain string objects the result of the join
+ If any of the list items is not a string object, the result of the join
will be `NaN`.
See Also
@@ -1118,13 +1125,12 @@ def str_join(arr, sep):
Examples
--------
-
Example with a list that contains non-string elements.
>>> s = pd.Series([['lion', 'elephant', 'zebra'],
... [1.1, 2.2, 3.3],
... ['cat', np.nan, 'dog'],
- ... ['cow', 4.5, 'goat']
+ ... ['cow', 4.5, 'goat'],
... ['duck', ['swan', 'fish'], 'guppy']])
>>> s
0 [lion, elephant, zebra]
@@ -1134,8 +1140,8 @@ def str_join(arr, sep):
4 [duck, [swan, fish], guppy]
dtype: object
- Join all lists using an '-', the lists containing object(s) of types other
- than str will become a NaN.
+ Join all lists using a '-'. The lists containing object(s) of types other
+ than str will produce a NaN.
>>> s.str.join('-')
0 lion-elephant-zebra
| Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR):
- [x] PR title is "DOC: update the docstring"
- [x] The validation script passes: scripts/validate_docstrings.py <your-function-or-method>
- [x] The PEP8 style check passes: git diff upstream/master -u -- "*.py" | flake8 --diff
- [x] The html version looks good: python doc/make.py --single <your-function-or-method>
- [x] It has been proofread on language by another sprint participant | https://api.github.com/repos/pandas-dev/pandas/pulls/22174 | 2018-08-02T19:03:45Z | 2018-08-08T12:30:55Z | 2018-08-08T12:30:55Z | 2018-08-08T14:39:51Z |
Fix Series v Index bool ops | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 9e2c20c78f489..0a82be89edeb2 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -809,5 +809,3 @@ Other
- :meth:`~pandas.io.formats.style.Styler.bar` now also supports tablewise application (in addition to rowwise and columnwise) with ``axis=None`` and setting clipping range with ``vmin`` and ``vmax`` (:issue:`21548` and :issue:`21526`). ``NaN`` values are also handled properly.
- Logical operations ``&, |, ^`` between :class:`Series` and :class:`Index` will no longer raise ``ValueError`` (:issue:`22092`)
-
--
--
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index a7fc2839ea101..70fe7de0a973e 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -1525,23 +1525,22 @@ def _bool_method_SERIES(cls, op, special):
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
+ op_name = _get_op_name(op, special)
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
- if isinstance(y, list):
- y = construct_1d_object_array_from_listlike(y)
-
- if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)):
- if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
- result = op(x, y) # when would this be hit?
- else:
- x = ensure_object(x)
- y = ensure_object(y)
- result = libops.vec_binop(x, y, op)
+ assert not isinstance(y, (list, ABCSeries, ABCIndexClass))
+ if isinstance(y, np.ndarray):
+ # bool-bool dtype operations should be OK, should not get here
+ assert not (is_bool_dtype(x) and is_bool_dtype(y))
+ x = ensure_object(x)
+ y = ensure_object(y)
+ result = libops.vec_binop(x, y, op)
else:
# let null fall thru
+ assert lib.is_scalar(y)
if not isna(y):
y = bool(y)
try:
@@ -1561,33 +1560,42 @@ def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
self, other = _align_method_SERIES(self, other, align_asobject=True)
+ res_name = get_op_result_name(self, other)
if isinstance(other, ABCDataFrame):
# Defer to DataFrame implementation; fail early
return NotImplemented
- elif isinstance(other, ABCSeries):
- name = get_op_result_name(self, other)
+ elif isinstance(other, (ABCSeries, ABCIndexClass)):
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
- filler = (fill_int if is_self_int_dtype and is_other_int_dtype
- else fill_bool)
-
- res_values = na_op(self.values, other.values)
- unfilled = self._constructor(res_values,
- index=self.index, name=name)
- return filler(unfilled)
+ ovalues = other.values
+ finalizer = lambda x: x
else:
# scalars, list, tuple, np.array
- filler = (fill_int if is_self_int_dtype and
- is_integer_dtype(np.asarray(other)) else fill_bool)
-
- res_values = na_op(self.values, other)
- unfilled = self._constructor(res_values, index=self.index)
- return filler(unfilled).__finalize__(self)
+ is_other_int_dtype = is_integer_dtype(np.asarray(other))
+ if is_list_like(other) and not isinstance(other, np.ndarray):
+ # TODO: Can we do this before the is_integer_dtype check?
+ # could the is_integer_dtype check be checking the wrong
+ # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]?
+ other = construct_1d_object_array_from_listlike(other)
+
+ ovalues = other
+ finalizer = lambda x: x.__finalize__(self)
+
+ # For int vs int `^`, `|`, `&` are bitwise operators and return
+ # integer dtypes. Otherwise these are boolean ops
+ filler = (fill_int if is_self_int_dtype and is_other_int_dtype
+ else fill_bool)
+ res_values = na_op(self.values, ovalues)
+ unfilled = self._constructor(res_values,
+ index=self.index, name=res_name)
+ filled = filler(unfilled)
+ return finalizer(filled)
+ wrapper.__name__ = op_name
return wrapper
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 615f0c9247bd8..601e251d45b4b 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -14,6 +14,7 @@
NaT, date_range, timedelta_range, Categorical)
from pandas.core.indexes.datetimes import Timestamp
import pandas.core.nanops as nanops
+from pandas.core import ops
from pandas.compat import range
from pandas import compat
@@ -425,30 +426,6 @@ def test_comparison_flex_alignment_fill(self):
exp = pd.Series([True, True, False, False], index=list('abcd'))
assert_series_equal(left.gt(right, fill_value=0), exp)
- def test_logical_ops_with_index(self):
- # GH22092
- ser = Series([True, True, False, False])
- idx1 = Index([True, False, True, False])
- idx2 = Index([1, 0, 1, 0])
-
- expected = Series([True, False, False, False])
- result1 = ser & idx1
- assert_series_equal(result1, expected)
- result2 = ser & idx2
- assert_series_equal(result2, expected)
-
- expected = Series([True, True, True, False])
- result1 = ser | idx1
- assert_series_equal(result1, expected)
- result2 = ser | idx2
- assert_series_equal(result2, expected)
-
- expected = Series([False, True, True, False])
- result1 = ser ^ idx1
- assert_series_equal(result1, expected)
- result2 = ser ^ idx2
- assert_series_equal(result2, expected)
-
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
@@ -627,6 +604,42 @@ def test_ops_datetimelike_align(self):
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
+ @pytest.mark.parametrize('op', [
+ operator.and_,
+ operator.or_,
+ operator.xor,
+ pytest.param(ops.rand_,
+ marks=pytest.mark.xfail(reason="GH#22092 Index "
+ "implementation returns "
+ "Index",
+ raises=AssertionError,
+ strict=True)),
+ pytest.param(ops.ror_,
+ marks=pytest.mark.xfail(reason="GH#22092 Index "
+ "implementation raises",
+ raises=ValueError, strict=True)),
+ pytest.param(ops.rxor,
+ marks=pytest.mark.xfail(reason="GH#22092 Index "
+ "implementation raises",
+ raises=TypeError, strict=True))
+ ])
+ def test_bool_ops_with_index(self, op):
+ # GH#22092, GH#19792
+ ser = Series([True, True, False, False])
+ idx1 = Index([True, False, True, False])
+ idx2 = Index([1, 0, 1, 0])
+
+ expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
+
+ result = op(ser, idx1)
+ assert_series_equal(result, expected)
+
+ expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))],
+ dtype=bool)
+
+ result = op(ser, idx2)
+ assert_series_equal(result, expected)
+
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
| Series boolean ops tests need some serious cleanup and fleshing out. This gets separates out one fix before going in to clean up the tests. | https://api.github.com/repos/pandas-dev/pandas/pulls/22173 | 2018-08-02T17:27:29Z | 2018-09-23T13:35:16Z | 2018-09-23T13:35:16Z | 2018-09-25T23:48:57Z |
BUG: fixed .str.contains(..., na=False) for categorical series | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index d6f9bb66e1e28..89404fc4305f8 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1272,7 +1272,7 @@ Strings
- Bug in :meth:`Index.str.partition` was not nan-safe (:issue:`23558`).
- Bug in :meth:`Index.str.split` was not nan-safe (:issue:`23677`).
--
+- Bug :func:`Series.str.contains` not respecting the ``na`` argument for a ``Categorical`` dtype ``Series`` (:issue:`22158`)
Interval
^^^^^^^^
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 58ce562d03d1d..0d89a97b69046 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -1857,7 +1857,7 @@ def __iter__(self):
g = self.get(i)
def _wrap_result(self, result, use_codes=True,
- name=None, expand=None):
+ name=None, expand=None, fill_value=np.nan):
from pandas.core.index import Index, MultiIndex
@@ -1867,7 +1867,8 @@ def _wrap_result(self, result, use_codes=True,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
- result = take_1d(result, self._orig.cat.codes)
+ result = take_1d(result, self._orig.cat.codes,
+ fill_value=fill_value)
if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'):
return result
@@ -2515,12 +2516,12 @@ def join(self, sep):
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(self._parent, pat, case=case, flags=flags, na=na,
regex=regex)
- return self._wrap_result(result)
+ return self._wrap_result(result, fill_value=na)
@copy(str_match)
def match(self, pat, case=True, flags=0, na=np.nan):
result = str_match(self._parent, pat, case=case, flags=flags, na=na)
- return self._wrap_result(result)
+ return self._wrap_result(result, fill_value=na)
@copy(str_replace)
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 42f0cebea83a0..2ff63b67d1202 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -512,10 +512,28 @@ def test_contains(self):
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
- # na
- values = Series(['om', 'foo', np.nan])
- res = values.str.contains('foo', na="foo")
- assert res.loc[2] == "foo"
+ def test_contains_for_object_category(self):
+ # gh 22158
+
+ # na for category
+ values = Series(["a", "b", "c", "a", np.nan], dtype="category")
+ result = values.str.contains('a', na=True)
+ expected = Series([True, False, False, True, True])
+ tm.assert_series_equal(result, expected)
+
+ result = values.str.contains('a', na=False)
+ expected = Series([True, False, False, True, False])
+ tm.assert_series_equal(result, expected)
+
+ # na for objects
+ values = Series(["a", "b", "c", "a", np.nan])
+ result = values.str.contains('a', na=True)
+ expected = Series([True, False, False, True, True])
+ tm.assert_series_equal(result, expected)
+
+ result = values.str.contains('a', na=False)
+ expected = Series([True, False, False, True, False])
+ tm.assert_series_equal(result, expected)
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
@@ -2875,7 +2893,7 @@ def test_get_complex_nested(self, to_type):
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
- def test_more_contains(self):
+ def test_contains_moar(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
'CABA', 'dog', 'cat'])
@@ -2925,7 +2943,7 @@ def test_contains_nan(self):
expected = Series([np.nan, np.nan, np.nan], dtype=np.object_)
assert_series_equal(result, expected)
- def test_more_replace(self):
+ def test_replace_moar(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA, 'CABA',
'dog', 'cat'])
| - [x] closes #22158
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22170 | 2018-08-02T14:02:13Z | 2018-11-20T01:13:57Z | 2018-11-20T01:13:57Z | 2018-11-20T01:14:01Z |
BUG: Fix using "inf"/"-inf" in na_values for csv with int index column | diff --git a/doc/source/whatsnew/v0.23.5.txt b/doc/source/whatsnew/v0.23.5.txt
index 916a246355b5f..f298dac8ae6a7 100644
--- a/doc/source/whatsnew/v0.23.5.txt
+++ b/doc/source/whatsnew/v0.23.5.txt
@@ -40,3 +40,7 @@ Bug Fixes
-
-
+
+**I/O**
+
+- Bug in :func:`read_csv` that caused it to raise ``OverflowError`` when trying to use 'inf' as ``na_value`` with integer index column (:issue:`17128`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 49705cb6d9ad2..4bf62b021cddc 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -95,7 +95,7 @@ def _ensure_data(values, dtype=None):
values = ensure_float64(values)
return values, 'float64', 'float64'
- except (TypeError, ValueError):
+ except (TypeError, ValueError, OverflowError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return ensure_object(values), 'object', 'object'
@@ -429,7 +429,7 @@ def isin(comps, values):
values = values.astype('int64', copy=False)
comps = comps.astype('int64', copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
- except (TypeError, ValueError):
+ except (TypeError, ValueError, OverflowError):
values = values.astype(object)
comps = comps.astype(object)
diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py
index 9c68770e06b02..880ab707cfd07 100644
--- a/pandas/tests/io/parser/na_values.py
+++ b/pandas/tests/io/parser/na_values.py
@@ -369,3 +369,14 @@ def test_no_na_filter_on_index(self):
expected = DataFrame({"a": [1, 4], "c": [3, 6]},
index=Index([np.nan, 5.0], name="b"))
tm.assert_frame_equal(out, expected)
+
+ def test_inf_na_values_with_int_index(self):
+ # see gh-17128
+ data = "idx,col1,col2\n1,3,4\n2,inf,-inf"
+
+ # Don't fail with OverflowError with infs and integer index column
+ out = self.read_csv(StringIO(data), index_col=[0],
+ na_values=['inf', '-inf'])
+ expected = DataFrame({"col1": [3, np.nan], "col2": [4, np.nan]},
+ index=Index([1, 2], name="idx"))
+ tm.assert_frame_equal(out, expected)
| - [x] closes #17128
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
The issue happens when you try to use 'inf' or '-inf' as part of na_values in read_csv.
Code snippet to reproduce:
```
from StringIO import StringIO
import pandas as pd
dataset = StringIO('''index,col1,col2,col3
1,6,10,14
2,7,11,15
3,8,12,16
4,9,13,17
5,inf,-inf,bla
''')
na_values = ['inf', '-inf', 'bla']
df = pd.read_csv(dataset, na_values=na_values, index_col='index')
print df
print df.dtypes
```
Without fix:
```
Traceback (most recent call last):
File "/home/modintsov/workspace/DataRobot/playground.py", line 39, in <module>
df = pd.read_csv(dataset, na_values=na_values, index_col='index')
File "/home/modintsov/.virtualenvs/dev/local/lib/python2.7/site-packages/pandas/io/parsers.py", line 678, in parser_f
return _read(filepath_or_buffer, kwds)
File "/home/modintsov/.virtualenvs/dev/local/lib/python2.7/site-packages/pandas/io/parsers.py", line 446, in _read
data = parser.read(nrows)
File "/home/modintsov/.virtualenvs/dev/local/lib/python2.7/site-packages/pandas/io/parsers.py", line 1036, in read
ret = self._engine.read(nrows)
File "/home/modintsov/.virtualenvs/dev/local/lib/python2.7/site-packages/pandas/io/parsers.py", line 1922, in read
index, names = self._make_index(data, alldata, names)
File "/home/modintsov/.virtualenvs/dev/local/lib/python2.7/site-packages/pandas/io/parsers.py", line 1426, in _make_index
index = self._agg_index(index)
File "/home/modintsov/.virtualenvs/dev/local/lib/python2.7/site-packages/pandas/io/parsers.py", line 1520, in _agg_index
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
File "/home/modintsov/.virtualenvs/dev/local/lib/python2.7/site-packages/pandas/io/parsers.py", line 1600, in _infer_types
mask = algorithms.isin(values, list(na_values))
File "/home/modintsov/.virtualenvs/dev/local/lib/python2.7/site-packages/pandas/core/algorithms.py", line 418, in isin
values, _, _ = _ensure_data(values, dtype=dtype)
File "/home/modintsov/.virtualenvs/dev/local/lib/python2.7/site-packages/pandas/core/algorithms.py", line 82, in _ensure_data
return _ensure_int64(values), 'int64', 'int64'
File "pandas/_libs/algos_common_helper.pxi", line 3227, in pandas._libs.algos.ensure_int64
File "pandas/_libs/algos_common_helper.pxi", line 3232, in pandas._libs.algos.ensure_int64
OverflowError: cannot convert float infinity to integer
```
With fix (as expected):
```
col1 col2 col3
index
1 6.0 10.0 14.0
2 7.0 11.0 15.0
3 8.0 12.0 16.0
4 9.0 13.0 17.0
5 NaN NaN NaN
col1 float64
col2 float64
col3 float64
dtype: object
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/22169 | 2018-08-02T11:56:34Z | 2018-08-09T10:38:01Z | 2018-08-09T10:38:01Z | 2018-08-09T11:02:46Z |
Fixed py36-only syntax [ci skip] | diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index 07634811370c7..3cf9a32dab398 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -141,7 +141,7 @@ class GetDummies(object):
def setup(self):
categories = list(string.ascii_letters[:12])
- s = pd.Series(np.random.choice(categories, size=1_000_000),
+ s = pd.Series(np.random.choice(categories, size=1000000),
dtype=pd.api.types.CategoricalDtype(categories))
self.s = s
| Closes https://github.com/pandas-dev/pandas/issues/22162
[ci skip] | https://api.github.com/repos/pandas-dev/pandas/pulls/22167 | 2018-08-02T10:57:16Z | 2018-08-02T10:57:35Z | 2018-08-02T10:57:35Z | 2018-08-02T10:57:39Z |
TST: Check DatetimeIndex.drop on DST boundary | diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 67eb81336f648..b83645414c8ff 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -738,6 +738,28 @@ def test_dti_drop_dont_lose_tz(self):
assert ind.tz is not None
+ def test_drop_dst_boundary(self):
+ # see gh-18031
+ tz = "Europe/Brussels"
+ freq = "15min"
+
+ start = pd.Timestamp("201710290100", tz=tz)
+ end = pd.Timestamp("201710290300", tz=tz)
+ index = pd.date_range(start=start, end=end, freq=freq)
+
+ expected = DatetimeIndex(["201710290115", "201710290130",
+ "201710290145", "201710290200",
+ "201710290215", "201710290230",
+ "201710290245", "201710290200",
+ "201710290215", "201710290230",
+ "201710290245", "201710290300"],
+ tz=tz, freq=freq,
+ ambiguous=[True, True, True, True,
+ True, True, True, False,
+ False, False, False, False])
+ result = index.drop(index[0])
+ tm.assert_index_equal(result, expected)
+
def test_date_range_localize(self):
rng = date_range('3/11/2012 03:00', periods=15, freq='H',
tz='US/Eastern')
| Title is self-explanatory.
Closes #18031.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22165 | 2018-08-02T07:50:57Z | 2018-08-03T15:09:50Z | 2018-08-03T15:09:50Z | 2018-08-03T17:01:23Z |
CLN: Use public method to capture UTC offsets | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 76e3d6e92d31e..14fd3f731480f 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -635,24 +635,12 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise',
# If the dateutil parser returned tzinfo, capture it
# to check if all arguments have the same tzinfo
- tz = py_dt.tzinfo
+ tz = py_dt.utcoffset()
if tz is not None:
seen_datetime_offset = 1
- if tz == dateutil_utc():
- # dateutil.tz.tzutc has no offset-like attribute
- # Just add the 0 offset explicitly
- out_tzoffset_vals.add(0)
- elif tz == tzlocal():
- # is comparison fails unlike other dateutil.tz
- # objects. Also, dateutil.tz.tzlocal has no
- # _offset attribute like tzoffset
- offset_seconds = tz._dst_offset.total_seconds()
- out_tzoffset_vals.add(offset_seconds)
- else:
- # dateutil.tz.tzoffset objects cannot be hashed
- # store the total_seconds() instead
- offset_seconds = tz._offset.total_seconds()
- out_tzoffset_vals.add(offset_seconds)
+ # dateutil timezone objects cannot be hashed, so store
+ # the UTC offsets in seconds instead
+ out_tzoffset_vals.add(tz.total_seconds())
else:
# Add a marker for naive string, to track if we are
# parsing mixed naive and aware strings
| xref https://github.com/pandas-dev/pandas/pull/21822#discussion_r207023121
Per @pganssle's suggestion, avoiding using private dateutil methods to access the timezone offset and more succinctly using `utcoffset` instead
| https://api.github.com/repos/pandas-dev/pandas/pulls/22164 | 2018-08-02T05:00:40Z | 2018-08-02T10:29:49Z | 2018-08-02T10:29:48Z | 2018-08-02T17:02:06Z |
dispatch scalar DataFrame ops to Series | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index f26d3d76592d0..a09e32d8a315d 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -216,7 +216,7 @@ New Behavior:
idx = pd.interval_range(0, 4)
idx.values
-This mirrors ``CateogricalIndex.values``, which returns a ``Categorical``.
+This mirrors ``CategoricalIndex.values``, which returns a ``Categorical``.
For situations where you need an ``ndarray`` of ``Interval`` objects, use
:meth:`numpy.asarray` or ``idx.astype(object)``.
@@ -406,6 +406,34 @@ Previous Behavior:
In [3]: pi - pi[0]
Out[3]: Int64Index([0, 1, 2], dtype='int64')
+
+.. _whatsnew_0240.api.timedelta64_subtract_nan
+
+Addition/Subtraction of ``NaN`` from :class:``DataFrame``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Adding or subtracting ``NaN`` from a :class:`DataFrame` column with
+`timedelta64[ns]` dtype will now raise a ``TypeError`` instead of returning
+all-``NaT``. This is for compatibility with ``TimedeltaIndex`` and
+``Series`` behavior (:issue:`22163`)
+
+.. ipython:: python
+
+ df = pd.DataFrame([pd.Timedelta(days=1)])
+ df - np.nan
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [4]: df = pd.DataFrame([pd.Timedelta(days=1)])
+
+ In [5]: df - np.nan
+ Out[5]:
+ 0
+ 0 NaT
+
+
.. _whatsnew_0240.api.extension:
ExtensionType Changes
@@ -538,6 +566,16 @@ Datetimelike
- Bug in :class:`DatetimeIndex` comparisons where string comparisons incorrectly raises ``TypeError`` (:issue:`22074`)
- Bug in :class:`DatetimeIndex` comparisons when comparing against ``timedelta64[ns]`` dtyped arrays; in some cases ``TypeError`` was incorrectly raised, in others it incorrectly failed to raise (:issue:`22074`)
- Bug in :class:`DatetimeIndex` comparisons when comparing against object-dtyped arrays (:issue:`22074`)
+- Bug in :class:`DataFrame` with ``datetime64[ns]`` dtype addition and subtraction with ``Timedelta``-like objects (:issue:`22005`,:issue:`22163`)
+- Bug in :class:`DataFrame` with ``datetime64[ns]`` dtype addition and subtraction with ``DateOffset`` objects returning an ``object`` dtype instead of ``datetime64[ns]`` dtype (:issue:`21610`,:issue:`22163`)
+- Bug in :class:`DataFrame` with ``datetime64[ns]`` dtype comparing against ``NaT`` incorrectly (:issue:`22242`,:issue:`22163`)
+- Bug in :class:`DataFrame` with ``datetime64[ns]`` dtype subtracting ``Timestamp``-like object incorrectly returned ``datetime64[ns]`` dtype instead of ``timedelta64[ns]`` dtype (:issue:`8554`,:issue:`22163`)
+- Bug in :class:`DataFrame` with ``datetime64[ns]`` dtype subtracting ``np.datetime64`` object with non-nanosecond unit failing to convert to nanoseconds (:issue:`18874`,:issue:`22163`)
+- Bug in :class:`DataFrame` comparisons against ``Timestamp``-like objects failing to raise ``TypeError`` for inequality checks with mismatched types (:issue:`8932`,:issue:`22163`)
+- Bug in :class:`DataFrame` with mixed dtypes including ``datetime64[ns]`` incorrectly raising ``TypeError`` on equality comparisons (:issue:`13128`,:issue:`22163`)
+- Bug in :meth:`DataFrame.eq` comparison against ``NaT`` incorrectly returning ``True`` or ``NaN`` (:issue:`15697`,:issue:`22163`)
+- Bug in :class:`DataFrame` with ``timedelta64[ns]`` dtype division by ``Timedelta``-like scalar incorrectly returning ``timedelta64[ns]`` dtype instead of ``float64`` dtype (:issue:`20088`,:issue:`22163`)
+-
Timedelta
^^^^^^^^^
@@ -585,6 +623,7 @@ Numeric
when supplied with a list of functions and ``axis=1`` (e.g. ``df.apply(['sum', 'mean'], axis=1)``),
a ``TypeError`` was wrongly raised. For all three methods such calculation are now done correctly. (:issue:`16679`).
- Bug in :class:`Series` comparison against datetime-like scalars and arrays (:issue:`22074`)
+- Bug in :class:`DataFrame` multiplication between boolean dtype and integer returning ``object`` dtype instead of integer dtype (:issue:`22047`,:issue:`22163`)
-
Strings
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 638129291b495..b97e5465d7e1c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4936,6 +4936,14 @@ def _combine_match_columns(self, other, func, level=None, try_cast=True):
return self._constructor(new_data)
def _combine_const(self, other, func, errors='raise', try_cast=True):
+ if lib.is_scalar(other) or np.ndim(other) == 0:
+ new_data = {i: func(self.iloc[:, i], other)
+ for i, col in enumerate(self.columns)}
+
+ result = self._constructor(new_data, index=self.index, copy=False)
+ result.columns = self.columns
+ return result
+
new_data = self._data.eval(func=func, other=other,
errors=errors,
try_cast=try_cast)
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index dc139a8e14f66..10418ccbb1f64 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -1350,7 +1350,7 @@ def na_op(x, y):
with np.errstate(all='ignore'):
result = method(y)
if result is NotImplemented:
- raise TypeError("invalid type comparison")
+ return invalid_comparison(x, y, op)
else:
result = op(x, y)
@@ -1366,6 +1366,10 @@ def wrapper(self, other, axis=None):
res_name = get_op_result_name(self, other)
+ if isinstance(other, list):
+ # TODO: same for tuples?
+ other = np.asarray(other)
+
if isinstance(other, ABCDataFrame): # pragma: no cover
# Defer to DataFrame implementation; fail early
return NotImplemented
@@ -1459,8 +1463,6 @@ def wrapper(self, other, axis=None):
else:
values = self.get_values()
- if isinstance(other, list):
- other = np.asarray(other)
with np.errstate(all='ignore'):
res = na_op(values, other)
@@ -1741,7 +1743,8 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
if fill_value is not None:
self = self.fillna(fill_value)
- return self._combine_const(other, na_op, try_cast=True)
+ pass_op = op if lib.is_scalar(other) else na_op
+ return self._combine_const(other, pass_op, try_cast=True)
f.__name__ = op_name
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index a27199b58cf5e..879a4e1b4af1a 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -63,6 +63,15 @@ def test_tz_aware_scalar_comparison(self, timestamps):
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
+ def test_dt64_nat_comparison(self):
+ # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
+ ts = pd.Timestamp.now()
+ df = pd.DataFrame([ts, pd.NaT])
+ expected = pd.DataFrame([True, False])
+
+ result = df == ts
+ tm.assert_frame_equal(result, expected)
+
class TestDatetime64SeriesComparison(object):
# TODO: moved from tests.series.test_operators; needs cleanup
@@ -640,10 +649,22 @@ def test_dti_cmp_object_dtype(self):
# Arithmetic
class TestFrameArithmetic(object):
+ def test_dt64arr_sub_dtscalar(self, box):
+ # GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
+ idx = pd.date_range('2013-01-01', periods=3)
+ idx = tm.box_expected(idx, box)
+
+ ts = pd.Timestamp('2013-01-01')
+ # TODO: parametrize over scalar types
+
+ expected = pd.TimedeltaIndex(['0 Days', '1 Day', '2 Days'])
+ expected = tm.box_expected(expected, box)
+
+ result = idx - ts
+ tm.assert_equal(result, expected)
- @pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano',
- strict=True)
def test_df_sub_datetime64_not_ns(self):
+ # GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
df = pd.DataFrame(pd.date_range('20130101', periods=3))
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
@@ -992,9 +1013,11 @@ def test_dti_add_sub_float(self, op, other):
with pytest.raises(TypeError):
op(dti, other)
- def test_dti_add_timestamp_raises(self):
+ def test_dti_add_timestamp_raises(self, box):
+ # GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
- msg = "cannot add DatetimeIndex and Timestamp"
+ idx = tm.box_expected(idx, box)
+ msg = "cannot add"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
@@ -1090,13 +1113,17 @@ def test_dti_add_intarray_no_freq(self, box):
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
- def test_dti_add_timedeltalike(self, tz_naive_fixture, delta):
+ def test_dti_add_timedeltalike(self, tz_naive_fixture, delta, box):
+ # GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
+ rng = tm.box_expected(rng, box)
+
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
- tm.assert_index_equal(result, expected)
+ expected = tm.box_expected(expected, box)
+ tm.assert_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz_naive_fixture, delta):
tz = tz_naive_fixture
@@ -1662,14 +1689,8 @@ def test_dti_with_offset_series(self, tz_naive_fixture, names):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
- @pytest.mark.parametrize('box', [
- pd.Index,
- pd.Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Returns object dtype",
- strict=True))
- ], ids=lambda x: x.__name__)
def test_dti_add_offset_tzaware(self, tz_aware_fixture, box):
+ # GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
timezone = tz_aware_fixture
if timezone == 'US/Pacific':
dates = date_range('2012-11-01', periods=3, tz=timezone)
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 71742d428ea3e..85a0a8dffc55f 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -58,13 +58,6 @@ def test_ops_series(self):
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="block.eval incorrect",
- strict=True))
- ])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
@@ -79,7 +72,7 @@ def test_ops_series(self):
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
- if (box is Series and
+ if (box in [Series, pd.DataFrame] and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 9a17dc580ff6c..f142f770a0c54 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -17,6 +17,53 @@
# Comparisons
class TestFrameComparisons(object):
+ def test_flex_comparison_nat(self):
+ # GH#15697, GH#22163 df.eq(pd.NaT) should behave like df == pd.NaT,
+ # and _definitely_ not be NaN
+ df = pd.DataFrame([pd.NaT])
+
+ result = df == pd.NaT
+ # result.iloc[0, 0] is a np.bool_ object
+ assert result.iloc[0, 0].item() is False
+
+ result = df.eq(pd.NaT)
+ assert result.iloc[0, 0].item() is False
+
+ result = df != pd.NaT
+ assert result.iloc[0, 0].item() is True
+
+ result = df.ne(pd.NaT)
+ assert result.iloc[0, 0].item() is True
+
+ def test_mixed_comparison(self):
+ # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
+ # not raise TypeError
+ # (this appears to be fixed before #22163, not sure when)
+ df = pd.DataFrame([['1989-08-01', 1], ['1989-08-01', 2]])
+ other = pd.DataFrame([['a', 'b'], ['c', 'd']])
+
+ result = df == other
+ assert not result.any().any()
+
+ result = df != other
+ assert result.all().all()
+
+ def test_df_numeric_cmp_dt64_raises(self):
+ # GH#8932, GH#22163
+ ts = pd.Timestamp.now()
+ df = pd.DataFrame({'x': range(5)})
+ with pytest.raises(TypeError):
+ df > ts
+ with pytest.raises(TypeError):
+ df < ts
+ with pytest.raises(TypeError):
+ ts < df
+ with pytest.raises(TypeError):
+ ts > df
+
+ assert not (df == ts).any().any()
+ assert (df != ts).all().all()
+
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
@@ -32,8 +79,8 @@ def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
- with pytest.raises(TypeError):
- df.__eq__(None)
+ result = df.__eq__(None)
+ assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
@@ -251,3 +298,20 @@ def test_arith_flex_zero_len_raises(self):
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
df_len0.sub(df['A'], axis=None, fill_value=3)
+
+
+class TestFrameArithmetic(object):
+ def test_df_bool_mul_int(self):
+ # GH#22047, GH#22163 multiplication by 1 should result in int dtype,
+ # not object dtype
+ df = pd.DataFrame([[False, True], [False, False]])
+ result = df * 1
+
+ # On appveyor this comes back as np.int32 instead of np.int64,
+ # so we check dtype.kind instead of just dtype
+ kinds = result.dtypes.apply(lambda x: x.kind)
+ assert (kinds == 'i').all()
+
+ result = 1 * df
+ kinds = result.dtypes.apply(lambda x: x.kind)
+ assert (kinds == 'i').all()
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index d885df76967b8..6a4cf1ffc6071 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -273,6 +273,8 @@ def test_getitem_boolean(self):
# test df[df > 0]
for df in [self.tsframe, self.mixed_frame,
self.mixed_float, self.mixed_int]:
+ if compat.PY3 and df is self.mixed_frame:
+ continue
data = df._get_numeric_data()
bif = df[df > 0]
@@ -2468,8 +2470,11 @@ def test_boolean_indexing_mixed(self):
assert_frame_equal(df2, expected)
df['foo'] = 'test'
- with tm.assert_raises_regex(TypeError, 'boolean setting '
- 'on mixed-type'):
+ msg = ("boolean setting on mixed-type|"
+ "not supported between|"
+ "unorderable types")
+ with tm.assert_raises_regex(TypeError, msg):
+ # TODO: This message should be the same in PY2/PY3
df[df > 0.3] = 1
def test_where(self):
@@ -2502,6 +2507,10 @@ def _check_get(df, cond, check_dtypes=True):
# check getting
for df in [default_frame, self.mixed_frame,
self.mixed_float, self.mixed_int]:
+ if compat.PY3 and df is self.mixed_frame:
+ with pytest.raises(TypeError):
+ df > 0
+ continue
cond = df > 0
_check_get(df, cond)
@@ -2549,6 +2558,10 @@ def _check_align(df, cond, other, check_dtypes=True):
assert (rs.dtypes == df.dtypes).all()
for df in [self.mixed_frame, self.mixed_float, self.mixed_int]:
+ if compat.PY3 and df is self.mixed_frame:
+ with pytest.raises(TypeError):
+ df > 0
+ continue
# other is a frame
cond = (df > 0)[1:]
@@ -2594,6 +2607,10 @@ def _check_set(df, cond, check_dtypes=True):
for df in [default_frame, self.mixed_frame, self.mixed_float,
self.mixed_int]:
+ if compat.PY3 and df is self.mixed_frame:
+ with pytest.raises(TypeError):
+ df > 0
+ continue
cond = df > 0
_check_set(df, cond)
@@ -2759,9 +2776,14 @@ def test_where_datetime(self):
C=np.random.randn(5)))
stamp = datetime(2013, 1, 3)
- result = df[df > stamp]
+ with pytest.raises(TypeError):
+ df > stamp
+
+ result = df[df.iloc[:, :-1] > stamp]
+
expected = df.copy()
expected.loc[[0, 1], 'A'] = np.nan
+ expected.loc[:, 'C'] = np.nan
assert_frame_equal(result, expected)
def test_where_none(self):
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 2fc59c5003a4d..da4424b1ae626 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -150,10 +150,15 @@ def test_timestamp_compare(self):
right_f = getattr(operator, right)
# no nats
- expected = left_f(df, Timestamp('20010109'))
- result = right_f(Timestamp('20010109'), df)
- assert_frame_equal(result, expected)
-
+ if left in ['eq', 'ne']:
+ expected = left_f(df, Timestamp('20010109'))
+ result = right_f(Timestamp('20010109'), df)
+ assert_frame_equal(result, expected)
+ else:
+ with pytest.raises(TypeError):
+ left_f(df, Timestamp('20010109'))
+ with pytest.raises(TypeError):
+ right_f(Timestamp('20010109'), df)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 21d895fa59021..f3bc523ca525e 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -473,7 +473,6 @@ def test_timedelta_ops_with_missing_values(self):
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
- NA = np.nan
actual = scalar1 + scalar1
assert actual == scalar2
@@ -541,10 +540,10 @@ def test_timedelta_ops_with_missing_values(self):
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
- actual = df1 + NA
- tm.assert_frame_equal(actual, dfn)
- actual = df1 - NA
- tm.assert_frame_equal(actual, dfn)
+ with pytest.raises(TypeError):
+ df1 + np.nan
+ with pytest.raises(TypeError):
+ df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 0b06775326ab1..34f22513106ba 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -1235,16 +1235,31 @@ def test_binop_other(self, op, value, dtype):
(operator.truediv, 'bool'),
(operator.mod, 'i8'),
(operator.mod, 'complex128'),
- (operator.mod, '<M8[ns]'),
- (operator.mod, '<m8[ns]'),
(operator.pow, 'bool')}
if (op, dtype) in skip:
pytest.skip("Invalid combination {},{}".format(op, dtype))
+
e = DummyElement(value, dtype)
s = pd.DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
- result = op(s, e).dtypes
- expected = op(s, value).dtypes
- assert_series_equal(result, expected)
+
+ invalid = {(operator.pow, '<M8[ns]'),
+ (operator.mod, '<M8[ns]'),
+ (operator.truediv, '<M8[ns]'),
+ (operator.mul, '<M8[ns]'),
+ (operator.add, '<M8[ns]'),
+ (operator.pow, '<m8[ns]'),
+ (operator.mod, '<m8[ns]'),
+ (operator.mul, '<m8[ns]')}
+
+ if (op, dtype) in invalid:
+ with pytest.raises(TypeError):
+ result = op(s, e.value)
+ else:
+ # FIXME: Since dispatching to Series, this test no longer
+ # asserts anything meaningful
+ result = op(s, e.value).dtypes
+ expected = op(s, value).dtypes
+ assert_series_equal(result, expected)
@pytest.mark.parametrize('typestr, holder', [
diff --git a/pandas/tests/test_arithmetic.py b/pandas/tests/test_arithmetic.py
index 30d69e243c446..ae067faba7929 100644
--- a/pandas/tests/test_arithmetic.py
+++ b/pandas/tests/test_arithmetic.py
@@ -12,7 +12,6 @@
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
-from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex,
@@ -429,15 +428,6 @@ def test_td64arr_add_sub_float(self, box, op, other):
with pytest.raises(TypeError):
op(tdi, other)
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Tries to cast df to "
- "Period",
- strict=True,
- raises=IncompatibleFrequency))
- ], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
@@ -481,17 +471,12 @@ def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
- msg = "cannot subtract a datelike from|Could not operate"
+ msg = ("cannot subtract a datelike from|"
+ "Could not operate|"
+ "cannot perform operation")
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Returns object dtype",
- strict=True))
- ], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
@@ -502,13 +487,6 @@ def test_td64arr_add_timestamp(self, box):
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Returns object dtype",
- strict=True))
- ], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
@@ -520,15 +498,6 @@ def test_td64_radd_timestamp(self, box):
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Returns object dtype "
- "instead of "
- "datetime64[ns]",
- strict=True))
- ], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
@@ -685,7 +654,8 @@ def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
- pytest.xfail(reason="DataFrame to broadcast incorrectly")
+ pytest.xfail(reason="reversed ops return incorrect answers "
+ "instead of raising.")
tdser = tm.box_expected(tdser, box)
err = TypeError
@@ -1151,8 +1121,7 @@ def test_timedelta64_conversions(self, m, unit):
# Multiplication
# organized with scalar others first, then array-like
- def test_td64arr_mul_int(self, box_df_fail):
- box = box_df_fail # DataFrame op returns object instead of m8[ns]
+ def test_td64arr_mul_int(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
@@ -1171,8 +1140,7 @@ def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
with pytest.raises(TypeError):
rng * delta
- def test_tdi_mul_int_array_zerodim(self, box_df_fail):
- box = box_df_fail # DataFrame op returns object dtype
+ def test_tdi_mul_int_array_zerodim(self, box):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
@@ -1248,24 +1216,22 @@ def test_tdi_rmul_arraylike(self, other, box_df_fail):
# ------------------------------------------------------------------
# __div__
- def test_td64arr_div_nat_invalid(self, box_df_fail):
+ def test_td64arr_div_nat_invalid(self, box):
# don't allow division by NaT (maybe could in the future)
- box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
- def test_td64arr_div_int(self, box_df_fail):
- box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
+ def test_td64arr_div_int(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
- def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
- box = box_df_fail # DataFrame op returns m8[ns] instead of float64
+ def test_tdi_div_tdlike_scalar(self, delta, box):
+ # GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
@@ -1275,8 +1241,7 @@ def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
result = rng / delta
tm.assert_equal(result, expected)
- def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
- box = box_df_fail # DataFrame op returns m8[ns] instead of float64
+ def test_tdi_div_tdlike_scalar_with_nat(self, delta, box):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
@@ -1289,14 +1254,6 @@ def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Incorrectly returns "
- "m8[ns] instead of f8",
- strict=True))
- ], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
@@ -1310,15 +1267,11 @@ def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
result = td1 // scalar_td
tm.assert_equal(result, expected)
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
- strict=True))
- ], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
+ if box is pd.DataFrame and isinstance(scalar_td, np.timedelta64):
+ pytest.xfail(reason="raises TypeError, not sure why")
+
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
@@ -1330,14 +1283,6 @@ def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
result = scalar_td // td1
tm.assert_equal(result, expected)
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
- "instead of f8",
- strict=True))
- ], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
@@ -1353,15 +1298,13 @@ def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
- def test_td64arr_floordiv_int(self, box_df_fail):
- box = box_df_fail # DataFrame returns object dtype
+ def test_td64arr_floordiv_int(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
tm.assert_equal(result, idx)
- def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
- box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
+ def test_td64arr_floordiv_tdlike_scalar(self, delta, box):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
@@ -1377,9 +1320,8 @@ def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
- def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
+ def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box):
# GH#19125
- box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
@@ -1398,15 +1340,6 @@ def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
# ------------------------------------------------------------------
# Operations with invalid others
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="__mul__ op treats "
- "timedelta other as i8; "
- "rmul OK",
- strict=True))
- ], ids=lambda x: x.__name__)
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
@@ -1439,13 +1372,6 @@ def test_td64arr_mul_td64arr_raises(self, box):
# ------------------------------------------------------------------
# Operations with numeric others
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Returns object-dtype",
- strict=True))
- ], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
@@ -1470,13 +1396,6 @@ def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
result = (2 * one) * tdser
tm.assert_equal(result, expected)
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Returns object-dtype",
- strict=True))
- ], ids=lambda x: x.__name__)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box, two, tdser):
# GH#4521
@@ -1616,14 +1535,10 @@ def test_float_series_rdiv_td64arr(self, box, names):
class TestTimedeltaArraylikeInvalidArithmeticOps(object):
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="raises ValueError "
- "instead of TypeError",
- strict=True))
- ])
+ @pytest.mark.parametrize('scalar_td', [
+ timedelta(minutes=5, seconds=4),
+ Timedelta('5m4s'),
+ Timedelta('5m4s').to_timedelta64()])
def test_td64arr_pow_invalid(self, scalar_td, box):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 2de7585ccedbd..468463d3eba5f 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -390,10 +390,10 @@ def test_bool_ops_raise_on_arithmetic(self):
with tm.assert_raises_regex(NotImplementedError, err_msg):
f(False, df.a)
- with tm.assert_raises_regex(TypeError, err_msg):
+ with tm.assert_raises_regex(NotImplementedError, err_msg):
f(False, df)
- with tm.assert_raises_regex(TypeError, err_msg):
+ with tm.assert_raises_regex(NotImplementedError, err_msg):
f(df, True)
def test_bool_ops_warn_on_arithmetic(self):
| Many issues closed; will track them down and update. Will also need whatsnew.
closes #18874
closes #20088
closes #15697
closes #13128
closes #8554
closes #8932
closes #21610
closes #22005
closes #22047
closes #22242
This will be less verbose after #22068 implements `ops.dispatch_to_series`.
This still only dispatches a subset of ops. #22019 dispatches another (disjoint) subset. After that is another easy-ish case where alignment is known. Saved for last are cases with ambiguous alignment that is currently done in an ad-hoc best-guess way. | https://api.github.com/repos/pandas-dev/pandas/pulls/22163 | 2018-08-02T01:36:24Z | 2018-08-14T10:49:12Z | 2018-08-14T10:49:10Z | 2020-04-05T17:40:29Z |
BUG: ensuring that np.asarray() simple handles data as objects and doesn't… | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 4bf62b021cddc..1757a73bf4473 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -134,7 +134,7 @@ def _ensure_data(values, dtype=None):
return values, dtype, 'int64'
# we have failed, return object
- values = np.asarray(values)
+ values = np.asarray(values, dtype=np.object)
return ensure_object(values), 'object', 'object'
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index f89c7545765c9..f118ee3ae0ed1 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -615,6 +615,68 @@ def test_categorical_from_codes(self):
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
+ def test_same_nan_is_in(self):
+ # GH 22160
+ # nan is special, because from " a is b" doesn't follow "a == b"
+ # at least, isin() should follow python's "np.nan in [nan] == True"
+ # casting to -> np.float64 -> another float-object somewher on
+ # the way could lead jepardize this behavior
+ comps = [np.nan] # could be casted to float64
+ values = [np.nan]
+ expected = np.array([True])
+ result = algos.isin(comps, values)
+ tm.assert_numpy_array_equal(expected, result)
+
+ def test_same_object_is_in(self):
+ # GH 22160
+ # there could be special treatment for nans
+ # the user however could define a custom class
+ # with similar behavior, then we at least should
+ # fall back to usual python's behavior: "a in [a] == True"
+ class LikeNan(object):
+ def __eq__(self):
+ return False
+
+ def __hash__(self):
+ return 0
+
+ a, b = LikeNan(), LikeNan()
+ # same object -> True
+ tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True]))
+ # different objects -> False
+ tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False]))
+
+ def test_different_nans(self):
+ # GH 22160
+ # all nans are handled as equivalent
+
+ comps = [float('nan')]
+ values = [float('nan')]
+ assert comps[0] is not values[0] # different nan-objects
+
+ # as list of python-objects:
+ result = algos.isin(comps, values)
+ tm.assert_numpy_array_equal(np.array([True]), result)
+
+ # as object-array:
+ result = algos.isin(np.asarray(comps, dtype=np.object),
+ np.asarray(values, dtype=np.object))
+ tm.assert_numpy_array_equal(np.array([True]), result)
+
+ # as float64-array:
+ result = algos.isin(np.asarray(comps, dtype=np.float64),
+ np.asarray(values, dtype=np.float64))
+ tm.assert_numpy_array_equal(np.array([True]), result)
+
+ def test_no_cast(self):
+ # GH 22160
+ # ensure 42 is not casted to a string
+ comps = ['ss', 42]
+ values = ['42']
+ expected = np.array([False, False])
+ result = algos.isin(comps, values)
+ tm.assert_numpy_array_equal(expected, result)
+
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
| … try to do smart things (GH22160)
- [x] closes #22160
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22161 | 2018-08-01T15:36:00Z | 2018-08-10T10:38:15Z | 2018-08-10T10:38:15Z | 2018-08-11T19:45:01Z |
BUG: make conftest._cython_table deterministic for Python<3.6 | diff --git a/pandas/conftest.py b/pandas/conftest.py
index c714ce2228d09..94c07bc09267f 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -123,13 +123,10 @@ def all_arithmetic_operators(request):
return request.param
-# use sorted as dicts in py<3.6 have random order, which xdist doesn't like
-_cython_table = sorted(((key, value) for key, value in
- pd.core.base.SelectionMixin._cython_table.items()),
- key=lambda x: x[0].__name__)
+_cython_table = pd.core.base.SelectionMixin._cython_table.items()
-@pytest.fixture(params=_cython_table)
+@pytest.fixture(params=list(_cython_table))
def cython_table_items(request):
return request.param
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 5382315bad32b..084a976320d77 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -23,7 +23,7 @@
import pandas.core.nanops as nanops
import pandas._libs.lib as lib
from pandas.compat.numpy import function as nv
-from pandas.compat import PYPY
+from pandas.compat import PYPY, OrderedDict
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
@@ -179,28 +179,28 @@ class SelectionMixin(object):
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
- _builtin_table = {
- builtins.sum: np.sum,
- builtins.max: np.max,
- builtins.min: np.min
- }
- _cython_table = {
- builtins.sum: 'sum',
- builtins.max: 'max',
- builtins.min: 'min',
- np.all: 'all',
- np.any: 'any',
- np.sum: 'sum',
- np.mean: 'mean',
- np.prod: 'prod',
- np.std: 'std',
- np.var: 'var',
- np.median: 'median',
- np.max: 'max',
- np.min: 'min',
- np.cumprod: 'cumprod',
- np.cumsum: 'cumsum'
- }
+ _builtin_table = OrderedDict((
+ (builtins.sum, np.sum),
+ (builtins.max, np.max),
+ (builtins.min, np.min),
+ ))
+ _cython_table = OrderedDict((
+ (builtins.sum, 'sum'),
+ (builtins.max, 'max'),
+ (builtins.min, 'min'),
+ (np.all, 'all'),
+ (np.any, 'any'),
+ (np.sum, 'sum'),
+ (np.mean, 'mean'),
+ (np.prod, 'prod'),
+ (np.std, 'std'),
+ (np.var, 'var'),
+ (np.median, 'median'),
+ (np.max, 'max'),
+ (np.min, 'min'),
+ (np.cumprod, 'cumprod'),
+ (np.cumsum, 'cumsum'),
+ ))
@property
def _selection_name(self):
| Closes a bug for Python < 3.6 where xdist gets non-deterministic input because of random dict ordering for Python < 3.6.
See #22156 for details. | https://api.github.com/repos/pandas-dev/pandas/pulls/22157 | 2018-08-01T12:45:11Z | 2018-08-20T10:36:43Z | 2018-08-20T10:36:43Z | 2018-09-20T21:12:59Z |
Fix deprecation warning in SeriesGroupBy.count | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 4c87f6122b956..d0a9413f011ed 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -46,6 +46,7 @@
from pandas.core.index import Index, MultiIndex, CategoricalIndex
from pandas.core.arrays.categorical import Categorical
from pandas.core.internals import BlockManager, make_block
+from pandas.compat.numpy import _np_version_under1p13
from pandas.plotting._core import boxplot_frame_groupby
@@ -1207,7 +1208,8 @@ def count(self):
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
- out = np.bincount(ids[mask], minlength=ngroups or None)
+ minlength = ngroups or (None if _np_version_under1p13 else 0)
+ out = np.bincount(ids[mask], minlength=minlength)
return Series(out,
index=self.grouper.result_index,
| See https://github.com/pandas-dev/pandas/pull/21957#issuecomment-409527005 | https://api.github.com/repos/pandas-dev/pandas/pulls/22155 | 2018-08-01T10:42:11Z | 2018-08-01T21:49:58Z | 2018-08-01T21:49:58Z | 2018-09-20T21:13:30Z |
~Finish collecting m8[ns] tests, start collecting division by zero tests | diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 3d03a70553d2d..8d52bc9532136 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -89,116 +89,6 @@ def test_df_add_flex_filled_mixed_dtypes(self):
tm.assert_frame_equal(result, expected)
-class TestFrameMulDiv(object):
- """Tests for DataFrame multiplication and division"""
- # ------------------------------------------------------------------
- # Mod By Zero
-
- def test_df_mod_zero_df(self):
- # GH#3590, modulo as ints
- df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
-
- # this is technically wrong, as the integer portion is coerced to float
- # ###
- first = pd.Series([0, 0, 0, 0], dtype='float64')
- second = pd.Series([np.nan, np.nan, np.nan, 0])
- expected = pd.DataFrame({'first': first, 'second': second})
- result = df % df
- tm.assert_frame_equal(result, expected)
-
- def test_df_mod_zero_array(self):
- # GH#3590, modulo as ints
- df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
-
- # this is technically wrong, as the integer portion is coerced to float
- # ###
- first = pd.Series([0, 0, 0, 0], dtype='float64')
- second = pd.Series([np.nan, np.nan, np.nan, 0])
- expected = pd.DataFrame({'first': first, 'second': second})
-
- # numpy has a slightly different (wrong) treatment
- with np.errstate(all='ignore'):
- arr = df.values % df.values
- result2 = pd.DataFrame(arr, index=df.index,
- columns=df.columns, dtype='float64')
- result2.iloc[0:3, 1] = np.nan
- tm.assert_frame_equal(result2, expected)
-
- def test_df_mod_zero_int(self):
- # GH#3590, modulo as ints
- df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
-
- result = df % 0
- expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
- tm.assert_frame_equal(result, expected)
-
- # numpy has a slightly different (wrong) treatment
- with np.errstate(all='ignore'):
- arr = df.values.astype('float64') % 0
- result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
- tm.assert_frame_equal(result2, expected)
-
- def test_df_mod_zero_series_does_not_commute(self):
- # GH#3590, modulo as ints
- # not commutative with series
- df = pd.DataFrame(np.random.randn(10, 5))
- ser = df[0]
- res = ser % df
- res2 = df % ser
- assert not res.fillna(0).equals(res2.fillna(0))
-
- # ------------------------------------------------------------------
- # Division By Zero
-
- def test_df_div_zero_df(self):
- # integer div, but deal with the 0's (GH#9144)
- df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
- result = df / df
-
- first = pd.Series([1.0, 1.0, 1.0, 1.0])
- second = pd.Series([np.nan, np.nan, np.nan, 1])
- expected = pd.DataFrame({'first': first, 'second': second})
- tm.assert_frame_equal(result, expected)
-
- def test_df_div_zero_array(self):
- # integer div, but deal with the 0's (GH#9144)
- df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
-
- first = pd.Series([1.0, 1.0, 1.0, 1.0])
- second = pd.Series([np.nan, np.nan, np.nan, 1])
- expected = pd.DataFrame({'first': first, 'second': second})
-
- with np.errstate(all='ignore'):
- arr = df.values.astype('float') / df.values
- result = pd.DataFrame(arr, index=df.index,
- columns=df.columns)
- tm.assert_frame_equal(result, expected)
-
- def test_df_div_zero_int(self):
- # integer div, but deal with the 0's (GH#9144)
- df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
-
- result = df / 0
- expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
- expected.iloc[0:3, 1] = np.nan
- tm.assert_frame_equal(result, expected)
-
- # numpy has a slightly different (wrong) treatment
- with np.errstate(all='ignore'):
- arr = df.values.astype('float64') / 0
- result2 = pd.DataFrame(arr, index=df.index,
- columns=df.columns)
- tm.assert_frame_equal(result2, expected)
-
- def test_df_div_zero_series_does_not_commute(self):
- # integer div, but deal with the 0's (GH#9144)
- df = pd.DataFrame(np.random.randn(10, 5))
- ser = df[0]
- res = ser / df
- res2 = df / ser
- assert not res.fillna(0).equals(res2.fillna(0))
-
-
class TestFrameArithmetic(object):
@pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano',
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 01ff038c4dd1c..32edb2591b735 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -163,48 +163,6 @@ def test_divmod_series(self):
for r, e in zip(result, expected):
tm.assert_series_equal(r, e)
- def test_div_zero(self, zero):
- idx = self.create_index()
-
- expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf],
- dtype=np.float64)
- result = idx / zero
- tm.assert_index_equal(result, expected)
- ser_compat = Series(idx).astype('i8') / np.array(zero).astype('i8')
- tm.assert_series_equal(ser_compat, Series(result))
-
- def test_floordiv_zero(self, zero):
- idx = self.create_index()
- expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf],
- dtype=np.float64)
-
- result = idx // zero
- tm.assert_index_equal(result, expected)
- ser_compat = Series(idx).astype('i8') // np.array(zero).astype('i8')
- tm.assert_series_equal(ser_compat, Series(result))
-
- def test_mod_zero(self, zero):
- idx = self.create_index()
-
- expected = Index([np.nan, np.nan, np.nan, np.nan, np.nan],
- dtype=np.float64)
- result = idx % zero
- tm.assert_index_equal(result, expected)
- ser_compat = Series(idx).astype('i8') % np.array(zero).astype('i8')
- tm.assert_series_equal(ser_compat, Series(result))
-
- def test_divmod_zero(self, zero):
- idx = self.create_index()
-
- exleft = Index([np.nan, np.inf, np.inf, np.inf, np.inf],
- dtype=np.float64)
- exright = Index([np.nan, np.nan, np.nan, np.nan, np.nan],
- dtype=np.float64)
-
- result = divmod(idx, zero)
- tm.assert_index_equal(result[0], exleft)
- tm.assert_index_equal(result[1], exright)
-
def test_explicit_conversions(self):
# GH 8608
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index a5e75de2a267e..e60705075267f 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -7,11 +7,11 @@
import pandas as pd
import pandas.util.testing as tm
-from pandas import (DatetimeIndex, TimedeltaIndex, Float64Index, Int64Index,
+from pandas import (DatetimeIndex, TimedeltaIndex, Int64Index,
to_timedelta, timedelta_range, date_range,
Series,
Timestamp, Timedelta)
-from pandas.errors import PerformanceWarning, NullFrequencyError
+from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
@@ -124,147 +124,6 @@ def test_comparisons_nat(self):
tm.assert_numpy_array_equal(result, expected)
-class TestTimedeltaIndexMultiplicationDivision(object):
- # __mul__, __rmul__,
- # __div__, __rdiv__, __floordiv__, __rfloordiv__,
- # __mod__, __rmod__, __divmod__, __rdivmod__
-
- # -------------------------------------------------------------
- # Multiplication
- # organized with scalar others first, then array-like
-
- def test_tdi_mul_int(self):
- idx = TimedeltaIndex(np.arange(5, dtype='int64'))
- result = idx * 1
- tm.assert_index_equal(result, idx)
-
- def test_tdi_rmul_int(self):
- idx = TimedeltaIndex(np.arange(5, dtype='int64'))
- result = 1 * idx
- tm.assert_index_equal(result, idx)
-
- def test_tdi_mul_tdlike_scalar_raises(self, delta):
- rng = timedelta_range('1 days', '10 days', name='foo')
- with pytest.raises(TypeError):
- rng * delta
-
- def test_tdi_mul_int_array_zerodim(self):
- rng5 = np.arange(5, dtype='int64')
- idx = TimedeltaIndex(rng5)
- expected = TimedeltaIndex(rng5 * 5)
- result = idx * np.array(5, dtype='int64')
- tm.assert_index_equal(result, expected)
-
- def test_tdi_mul_int_array(self):
- rng5 = np.arange(5, dtype='int64')
- idx = TimedeltaIndex(rng5)
- didx = TimedeltaIndex(rng5 ** 2)
-
- result = idx * rng5
- tm.assert_index_equal(result, didx)
-
- def test_tdi_mul_dti_raises(self):
- idx = TimedeltaIndex(np.arange(5, dtype='int64'))
- with pytest.raises(TypeError):
- idx * idx
-
- def test_tdi_mul_too_short_raises(self):
- idx = TimedeltaIndex(np.arange(5, dtype='int64'))
- with pytest.raises(TypeError):
- idx * TimedeltaIndex(np.arange(3))
- with pytest.raises(ValueError):
- idx * np.array([1, 2])
-
- def test_tdi_mul_int_series(self):
- idx = TimedeltaIndex(np.arange(5, dtype='int64'))
- didx = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
-
- result = idx * Series(np.arange(5, dtype='int64'))
-
- tm.assert_series_equal(result, Series(didx))
-
- def test_tdi_mul_float_series(self):
- idx = TimedeltaIndex(np.arange(5, dtype='int64'))
-
- rng5f = np.arange(5, dtype='float64')
- result = idx * Series(rng5f + 0.1)
- expected = Series(TimedeltaIndex(rng5f * (rng5f + 0.1)))
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize('other', [np.arange(1, 11),
- pd.Int64Index(range(1, 11)),
- pd.UInt64Index(range(1, 11)),
- pd.Float64Index(range(1, 11)),
- pd.RangeIndex(1, 11)])
- def test_tdi_rmul_arraylike(self, other):
- tdi = TimedeltaIndex(['1 Day'] * 10)
- expected = timedelta_range('1 days', '10 days')
-
- result = other * tdi
- tm.assert_index_equal(result, expected)
- commute = tdi * other
- tm.assert_index_equal(commute, expected)
-
- # -------------------------------------------------------------
- # TimedeltaIndex.__div__
-
- def test_tdi_div_int(self):
- idx = TimedeltaIndex(np.arange(5, dtype='int64'))
- result = idx / 1
- tm.assert_index_equal(result, idx)
-
- def test_tdi_div_tdlike_scalar(self, delta):
- rng = timedelta_range('1 days', '10 days', name='foo')
- expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
-
- result = rng / delta
- tm.assert_index_equal(result, expected, exact=False)
-
- def test_tdi_div_tdlike_scalar_with_nat(self, delta):
- rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
- expected = Float64Index([12, np.nan, 24], name='foo')
- result = rng / delta
- tm.assert_index_equal(result, expected)
-
- def test_tdi_div_nat_raises(self):
- # don't allow division by NaT (make could in the future)
- rng = timedelta_range('1 days', '10 days', name='foo')
- with pytest.raises(TypeError):
- rng / pd.NaT
-
- # -------------------------------------------------------------
- # TimedeltaIndex.__floordiv__
-
- def test_tdi_floordiv_int(self):
- idx = TimedeltaIndex(np.arange(5, dtype='int64'))
- result = idx // 1
- tm.assert_index_equal(result, idx)
-
- def test_tdi_floordiv_tdlike_scalar(self, delta):
- tdi = timedelta_range('1 days', '10 days', name='foo')
- expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
-
- result = tdi // delta
- tm.assert_index_equal(result, expected, exact=False)
-
- @pytest.mark.parametrize('scalar_td', [
- timedelta(minutes=10, seconds=7),
- Timedelta('10m7s'),
- Timedelta('10m7s').to_timedelta64()])
- def test_tdi_floordiv_timedelta_scalar(self, scalar_td):
- # GH#19125
- tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
- expected = pd.Index([2.0, 2.0, np.nan])
-
- res = tdi.__rfloordiv__(scalar_td)
- tm.assert_index_equal(res, expected)
-
- expected = pd.Index([0.0, 0.0, np.nan])
-
- res = tdi // (scalar_td)
- tm.assert_index_equal(res, expected)
-
-
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
@@ -324,122 +183,6 @@ def test_shift_no_freq(self):
# -------------------------------------------------------------
- @pytest.mark.parametrize('names', [(None, None, None),
- ('foo', 'bar', None),
- ('foo', 'foo', 'foo')])
- def test_tdi_add_offset_index(self, names):
- # GH#18849, GH#19744
- tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
- name=names[0])
- other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
- name=names[1])
-
- expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
- freq='infer', name=names[2])
-
- with tm.assert_produces_warning(PerformanceWarning):
- res = tdi + other
- tm.assert_index_equal(res, expected)
-
- with tm.assert_produces_warning(PerformanceWarning):
- res2 = other + tdi
- tm.assert_index_equal(res2, expected)
-
- def test_tdi_add_offset_array(self):
- # GH#18849
- tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
- other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
-
- expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
- freq='infer')
-
- with tm.assert_produces_warning(PerformanceWarning):
- res = tdi + other
- tm.assert_index_equal(res, expected)
-
- with tm.assert_produces_warning(PerformanceWarning):
- res2 = other + tdi
- tm.assert_index_equal(res2, expected)
-
- @pytest.mark.parametrize('names', [(None, None, None),
- ('foo', 'bar', None),
- ('foo', 'foo', 'foo')])
- def test_tdi_sub_offset_index(self, names):
- # GH#18824, GH#19744
- tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
- name=names[0])
- other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
- name=names[1])
-
- expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
- freq='infer', name=names[2])
-
- with tm.assert_produces_warning(PerformanceWarning):
- res = tdi - other
- tm.assert_index_equal(res, expected)
-
- def test_tdi_sub_offset_array(self):
- # GH#18824
- tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
- other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
-
- expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
- freq='infer')
-
- with tm.assert_produces_warning(PerformanceWarning):
- res = tdi - other
- tm.assert_index_equal(res, expected)
-
- @pytest.mark.parametrize('names', [(None, None, None),
- ('foo', 'bar', None),
- ('foo', 'foo', 'foo')])
- def test_tdi_with_offset_series(self, names):
- # GH#18849
- tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
- name=names[0])
- other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
- name=names[1])
-
- expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
- name=names[2])
-
- with tm.assert_produces_warning(PerformanceWarning):
- res = tdi + other
- tm.assert_series_equal(res, expected_add)
-
- with tm.assert_produces_warning(PerformanceWarning):
- res2 = other + tdi
- tm.assert_series_equal(res2, expected_add)
-
- expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
- name=names[2])
-
- with tm.assert_produces_warning(PerformanceWarning):
- res3 = tdi - other
- tm.assert_series_equal(res3, expected_sub)
-
- @pytest.mark.parametrize('box', [np.array, pd.Index, pd.Series])
- def test_tdi_add_sub_anchored_offset_arraylike(self, box):
- # GH#18824
- tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
-
- anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
-
- # addition/subtraction ops with anchored offsets should issue
- # a PerformanceWarning and _then_ raise a TypeError.
- with pytest.raises(TypeError):
- with tm.assert_produces_warning(PerformanceWarning):
- tdi + anchored
- with pytest.raises(TypeError):
- with tm.assert_produces_warning(PerformanceWarning):
- anchored + tdi
- with pytest.raises(TypeError):
- with tm.assert_produces_warning(PerformanceWarning):
- tdi - anchored
- with pytest.raises(TypeError):
- with tm.assert_produces_warning(PerformanceWarning):
- anchored - tdi
-
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
@@ -546,14 +289,7 @@ def test_tdi_addsub_integer_array_no_freq(self, box):
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
-
- def test_tdi_add_timedeltalike(self, delta):
- # only test adding/sub offsets as + is now numeric
- rng = timedelta_range('1 days', '10 days')
- result = rng + delta
- expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
- freq='D')
- tm.assert_index_equal(result, expected)
+ # Note: add and sub are tested in tests.test_arithmetic
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
@@ -563,13 +299,6 @@ def test_tdi_iadd_timedeltalike(self, delta):
rng += delta
tm.assert_index_equal(rng, expected)
- def test_tdi_sub_timedeltalike(self, delta):
- # only test adding/sub offsets as - is now numeric
- rng = timedelta_range('1 days', '10 days')
- result = rng - delta
- expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
- tm.assert_index_equal(result, expected)
-
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
@@ -577,55 +306,6 @@ def test_tdi_isub_timedeltalike(self, delta):
rng -= delta
tm.assert_index_equal(rng, expected)
- # -------------------------------------------------------------
- # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
-
- def test_tdi_sub_dt64_array(self):
- dti = pd.date_range('2016-01-01', periods=3)
- tdi = dti - dti.shift(1)
- dtarr = dti.values
-
- with pytest.raises(TypeError):
- tdi - dtarr
-
- # TimedeltaIndex.__rsub__
- expected = pd.DatetimeIndex(dtarr) - tdi
- result = dtarr - tdi
- tm.assert_index_equal(result, expected)
-
- def test_tdi_add_dt64_array(self):
- dti = pd.date_range('2016-01-01', periods=3)
- tdi = dti - dti.shift(1)
- dtarr = dti.values
-
- expected = pd.DatetimeIndex(dtarr) + tdi
- result = tdi + dtarr
- tm.assert_index_equal(result, expected)
- result = dtarr + tdi
- tm.assert_index_equal(result, expected)
-
- def test_tdi_add_td64_array(self):
- dti = pd.date_range('2016-01-01', periods=3)
- tdi = dti - dti.shift(1)
- tdarr = tdi.values
-
- expected = 2 * tdi
- result = tdi + tdarr
- tm.assert_index_equal(result, expected)
- result = tdarr + tdi
- tm.assert_index_equal(result, expected)
-
- def test_tdi_sub_td64_array(self):
- dti = pd.date_range('2016-01-01', periods=3)
- tdi = dti - dti.shift(1)
- tdarr = tdi.values
-
- expected = 0 * tdi
- result = tdi - tdarr
- tm.assert_index_equal(result, expected)
- result = tdarr - tdi
- tm.assert_index_equal(result, expected)
-
# -------------------------------------------------------------
def test_subtraction_ops(self):
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index c091df63fcfc7..1413e01000ec5 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -355,64 +355,6 @@ def test_div_equiv_binop(self):
result = second / first
tm.assert_series_equal(result, expected)
- @pytest.mark.parametrize('dtype2', [
- np.int64, np.int32, np.int16, np.int8,
- np.float64, np.float32, np.float16,
- np.uint64, np.uint32, np.uint16, np.uint8])
- @pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64])
- def test_ser_div_ser(self, dtype1, dtype2):
- # no longer do integer div for any ops, but deal with the 0's
- first = Series([3, 4, 5, 8], name='first').astype(dtype1)
- second = Series([0, 0, 0, 3], name='second').astype(dtype2)
-
- with np.errstate(all='ignore'):
- expected = Series(first.values.astype(np.float64) / second.values,
- dtype='float64', name=None)
- expected.iloc[0:3] = np.inf
-
- result = first / second
- tm.assert_series_equal(result, expected)
- assert not result.equals(second / first)
-
- def test_rdiv_zero_compat(self):
- # GH#8674
- zero_array = np.array([0] * 5)
- data = np.random.randn(5)
- expected = Series([0.] * 5)
-
- result = zero_array / Series(data)
- tm.assert_series_equal(result, expected)
-
- result = Series(zero_array) / data
- tm.assert_series_equal(result, expected)
-
- result = Series(zero_array) / Series(data)
- tm.assert_series_equal(result, expected)
-
- def test_div_zero_inf_signs(self):
- # GH#9144, inf signing
- ser = Series([-1, 0, 1], name='first')
- expected = Series([-np.inf, np.nan, np.inf], name='first')
-
- result = ser / 0
- tm.assert_series_equal(result, expected)
-
- def test_rdiv_zero(self):
- # GH#9144
- ser = Series([-1, 0, 1], name='first')
- expected = Series([0.0, np.nan, 0.0], name='first')
-
- result = 0 / ser
- tm.assert_series_equal(result, expected)
-
- def test_floordiv_div(self):
- # GH#9144
- ser = Series([-1, 0, 1], name='first')
-
- result = ser // 0
- expected = Series([-np.inf, np.nan, np.inf], name='first')
- tm.assert_series_equal(result, expected)
-
class TestSeriesArithmetic(object):
# Standard, numeric, or otherwise not-Timestamp/Timedelta/Period dtypes
diff --git a/pandas/tests/test_arithmetic.py b/pandas/tests/test_arithmetic.py
index 8ee0bf9ec874a..0d62a5db581fe 100644
--- a/pandas/tests/test_arithmetic.py
+++ b/pandas/tests/test_arithmetic.py
@@ -10,10 +10,12 @@
import pandas as pd
import pandas.util.testing as tm
+from pandas.compat import long
from pandas.core import ops
-from pandas.errors import NullFrequencyError
+from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
+ timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
@@ -28,6 +30,49 @@ def tdser():
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
+@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
+ np.timedelta64(2, 'h'), Timedelta(hours=2)],
+ ids=lambda x: type(x).__name__)
+def delta(request):
+ """
+ Several ways of representing two hours
+ """
+ return request.param
+
+
+@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
+ Timedelta('5m4s'),
+ Timedelta('5m4s').to_timedelta64()],
+ ids=lambda x: type(x).__name__)
+def scalar_td(request):
+ """
+ Several variants of Timedelta scalars representing 5 minutes and 4 seconds
+ """
+ return request.param
+
+
+@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
+ ids=lambda x: x.__name__)
+def box(request):
+ """
+ Several array-like containers that should have effectively identical
+ behavior with respect to arithmetic operations.
+ """
+ return request.param
+
+
+@pytest.fixture(params=[pd.Index,
+ Series,
+ pytest.param(pd.DataFrame,
+ marks=pytest.mark.xfail(strict=True))],
+ ids=lambda x: x.__name__)
+def box_df_fail(request):
+ """
+ Fixture equivalent to `box` fixture but xfailing the DataFrame case.
+ """
+ return request.param
+
+
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
@@ -58,7 +103,7 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
- expected = pd.timedelta_range('1 days', '10 days')
+ expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
@@ -69,7 +114,6 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
commute = scalar_td * index
tm.assert_equal(commute, expected)
- @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
@@ -109,8 +153,6 @@ class TestTimedeltaArraylikeAddSubOps(object):
# -------------------------------------------------------------
# Invalid Operations
- @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
- ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
@@ -121,8 +163,6 @@ def test_td64arr_add_str_invalid(self, box):
with pytest.raises(TypeError):
'a' + tdi
- @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
- ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
@@ -186,8 +226,6 @@ def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
- @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
- ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
@@ -231,6 +269,73 @@ def test_td64_radd_timestamp(self, box):
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
+ @pytest.mark.parametrize('box', [
+ pd.Index,
+ Series,
+ pytest.param(pd.DataFrame,
+ marks=pytest.mark.xfail(reason="Returns object dtype "
+ "instead of "
+ "datetime64[ns]",
+ strict=True))
+ ], ids=lambda x: x.__name__)
+ def test_td64arr_add_sub_timestamp(self, box):
+ # GH#11925
+ ts = Timestamp('2012-01-01')
+ # TODO: parametrize over types of datetime scalar?
+
+ tdser = Series(timedelta_range('1 day', periods=3))
+ expected = Series(pd.date_range('2012-01-02', periods=3))
+
+ tdser = tm.box_expected(tdser, box)
+ expected = tm.box_expected(expected, box)
+
+ tm.assert_equal(ts + tdser, expected)
+ tm.assert_equal(tdser + ts, expected)
+
+ expected2 = Series(pd.date_range('2011-12-31',
+ periods=3, freq='-1D'))
+ expected2 = tm.box_expected(expected2, box)
+
+ tm.assert_equal(ts - tdser, expected2)
+ tm.assert_equal(ts + (-tdser), expected2)
+
+ with pytest.raises(TypeError):
+ tdser - ts
+
+ def test_tdi_sub_dt64_array(self, box_df_fail):
+ box = box_df_fail # DataFrame tries to broadcast incorrectly
+
+ dti = pd.date_range('2016-01-01', periods=3)
+ tdi = dti - dti.shift(1)
+ dtarr = dti.values
+ expected = pd.DatetimeIndex(dtarr) - tdi
+
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
+
+ with pytest.raises(TypeError):
+ tdi - dtarr
+
+ # TimedeltaIndex.__rsub__
+ result = dtarr - tdi
+ tm.assert_equal(result, expected)
+
+ def test_tdi_add_dt64_array(self, box_df_fail):
+ box = box_df_fail # DataFrame tries to broadcast incorrectly
+
+ dti = pd.date_range('2016-01-01', periods=3)
+ tdi = dti - dti.shift(1)
+ dtarr = dti.values
+ expected = pd.DatetimeIndex(dtarr) + tdi
+
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
+
+ result = tdi + dtarr
+ tm.assert_equal(result, expected)
+ result = dtarr + tdi
+ tm.assert_equal(result, expected)
+
# ------------------------------------------------------------------
# Operations with int-like others
@@ -280,8 +385,6 @@ def test_td64arr_sub_int_series_invalid(self, box, tdser):
with pytest.raises(err):
tdser - Series([2, 3, 4])
- @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
- ids=lambda x: x.__name__)
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
@@ -293,7 +396,7 @@ def test_td64arr_rsub_int_series_invalid(self, box, tdser):
pd.Index,
Series,
pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Tries to broadcast "
+ marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@@ -326,8 +429,6 @@ def test_td64arr_add_intlike(self, box):
with pytest.raises(err):
ser - pd.Index(other)
- @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
- ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
@@ -387,43 +488,39 @@ def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
vector - tdser
# ------------------------------------------------------------------
- # Operations with datetime-like others
+ # Operations with timedelta-like others
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Returns object dtype "
- "instead of "
- "datetime64[ns]",
- strict=True))
- ], ids=lambda x: x.__name__)
- def test_td64arr_add_sub_timestamp(self, box):
- # GH#11925
- ts = Timestamp('2012-01-01')
- # TODO: parametrize over types of datetime scalar?
+ def test_td64arr_add_td64_array(self, box_df_fail):
+ box = box_df_fail # DataFrame tries to broadcast incorrectly
- tdser = Series(pd.timedelta_range('1 day', periods=3))
- expected = Series(pd.date_range('2012-01-02', periods=3))
+ dti = pd.date_range('2016-01-01', periods=3)
+ tdi = dti - dti.shift(1)
+ tdarr = tdi.values
- tdser = tm.box_expected(tdser, box)
+ expected = 2 * tdi
+ tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
- tm.assert_equal(ts + tdser, expected)
- tm.assert_equal(tdser + ts, expected)
+ result = tdi + tdarr
+ tm.assert_equal(result, expected)
+ result = tdarr + tdi
+ tm.assert_equal(result, expected)
- expected2 = Series(pd.date_range('2011-12-31',
- periods=3, freq='-1D'))
- expected2 = tm.box_expected(expected2, box)
+ def test_td64arr_sub_td64_array(self, box_df_fail):
+ box = box_df_fail # DataFrame tries to broadcast incorrectly
- tm.assert_equal(ts - tdser, expected2)
- tm.assert_equal(ts + (-tdser), expected2)
+ dti = pd.date_range('2016-01-01', periods=3)
+ tdi = dti - dti.shift(1)
+ tdarr = tdi.values
- with pytest.raises(TypeError):
- tdser - ts
+ expected = 0 * tdi
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
- # ------------------------------------------------------------------
- # Operations with timedelta-like others (including DateOffsets)
+ result = tdi - tdarr
+ tm.assert_equal(result, expected)
+ result = tdarr - tdi
+ tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
@@ -481,8 +578,6 @@ def test_td64arr_add_sub_tdi(self, box, names):
else:
assert result.dtypes[0] == 'timedelta64[ns]'
- @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
- ids=lambda x: x.__name__)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
@@ -491,14 +586,348 @@ def test_td64arr_sub_NaT(self, box):
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
- res = ser - NaT
+ res = ser - pd.NaT
+ tm.assert_equal(res, expected)
+
+ def test_td64arr_add_timedeltalike(self, delta, box):
+ # only test adding/sub offsets as + is now numeric
+ if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
+ pytest.xfail(reason="Returns object dtype instead of m8[ns]")
+
+ rng = timedelta_range('1 days', '10 days')
+ expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
+ freq='D')
+ rng = tm.box_expected(rng, box)
+ expected = tm.box_expected(expected, box)
+
+ result = rng + delta
+ tm.assert_equal(result, expected)
+
+ def test_td64arr_sub_timedeltalike(self, delta, box):
+ # only test adding/sub offsets as - is now numeric
+ if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
+ pytest.xfail(reason="Returns object dtype instead of m8[ns]")
+
+ rng = timedelta_range('1 days', '10 days')
+ expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
+
+ rng = tm.box_expected(rng, box)
+ expected = tm.box_expected(expected, box)
+
+ result = rng - delta
+ tm.assert_equal(result, expected)
+
+ # ------------------------------------------------------------------
+ # __add__/__sub__ with DateOffsets and arrays of DateOffsets
+
+ @pytest.mark.parametrize('box', [
+ pd.Index,
+ pytest.param(Series,
+ marks=pytest.mark.xfail(reason="Index fails to return "
+ "NotImplemented on "
+ "reverse op",
+ strict=True)),
+ pytest.param(pd.DataFrame,
+ marks=pytest.mark.xfail(reason="Tries to broadcast "
+ "incorrectly",
+ strict=True, raises=ValueError))
+ ], ids=lambda x: x.__name__)
+ @pytest.mark.parametrize('names', [(None, None, None),
+ ('foo', 'bar', None),
+ ('foo', 'foo', 'foo')])
+ def test_td64arr_add_offset_index(self, names, box):
+ # GH#18849, GH#19744
+ tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
+ name=names[0])
+ other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
+ name=names[1])
+
+ expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
+ freq='infer', name=names[2])
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ res = tdi + other
+ tm.assert_equal(res, expected)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ res2 = other + tdi
+ tm.assert_equal(res2, expected)
+
+ # TODO: combine with test_td64arr_add_offset_index by parametrizing
+ # over second box?
+ def test_td64arr_add_offset_array(self, box_df_fail):
+ # GH#18849
+ box = box_df_fail # tries to broadcast incorrectly
+ tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
+ other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
+
+ expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
+ freq='infer')
+
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ res = tdi + other
tm.assert_equal(res, expected)
+ with tm.assert_produces_warning(PerformanceWarning):
+ res2 = other + tdi
+ tm.assert_equal(res2, expected)
+
+ @pytest.mark.parametrize('names', [(None, None, None),
+ ('foo', 'bar', None),
+ ('foo', 'foo', 'foo')])
+ def test_td64arr_sub_offset_index(self, names, box_df_fail):
+ # GH#18824, GH#19744
+ box = box_df_fail # tries to broadcast incorrectly
+ tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
+ name=names[0])
+ other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
+ name=names[1])
+
+ expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
+ freq='infer', name=names[2])
+
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ res = tdi - other
+ tm.assert_equal(res, expected)
+
+ def test_td64arr_sub_offset_array(self, box_df_fail):
+ # GH#18824
+ box = box_df_fail # tries to broadcast incorrectly
+ tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
+ other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
+
+ expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
+ freq='infer')
+
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ res = tdi - other
+ tm.assert_equal(res, expected)
+
+ @pytest.mark.parametrize('box', [
+ pd.Index,
+ pytest.param(Series,
+ marks=pytest.mark.xfail(reason="object dtype Series "
+ "fails to return "
+ "NotImplemented",
+ strict=True, raises=TypeError)),
+ pytest.param(pd.DataFrame,
+ marks=pytest.mark.xfail(reason="tries to broadcast "
+ "incorrectly",
+ strict=True, raises=ValueError))
+ ], ids=lambda x: x.__name__)
+ @pytest.mark.parametrize('names', [(None, None, None),
+ ('foo', 'bar', None),
+ ('foo', 'foo', 'foo')])
+ def test_td64arr_with_offset_series(self, names, box):
+ # GH#18849
+ box2 = Series if box is pd.Index else box
+
+ tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
+ name=names[0])
+ other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
+ name=names[1])
+
+ expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
+ name=names[2])
+ tdi = tm.box_expected(tdi, box)
+ expected_add = tm.box_expected(expected_add, box2)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ res = tdi + other
+ tm.assert_equal(res, expected_add)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ res2 = other + tdi
+ tm.assert_equal(res2, expected_add)
+
+ # TODO: separate/parametrize add/sub test?
+ expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
+ name=names[2])
+ expected_sub = tm.box_expected(expected_sub, box2)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ res3 = tdi - other
+ tm.assert_equal(res3, expected_sub)
+
+ @pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
+ def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
+ # GH#18824
+ box = box_df_fail # DataFrame tries to broadcast incorrectly
+ tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
+ tdi = tm.box_expected(tdi, box)
+
+ anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
+
+ # addition/subtraction ops with anchored offsets should issue
+ # a PerformanceWarning and _then_ raise a TypeError.
+ with pytest.raises(TypeError):
+ with tm.assert_produces_warning(PerformanceWarning):
+ tdi + anchored
+ with pytest.raises(TypeError):
+ with tm.assert_produces_warning(PerformanceWarning):
+ anchored + tdi
+ with pytest.raises(TypeError):
+ with tm.assert_produces_warning(PerformanceWarning):
+ tdi - anchored
+ with pytest.raises(TypeError):
+ with tm.assert_produces_warning(PerformanceWarning):
+ anchored - tdi
+
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
+ # ------------------------------------------------------------------
+ # Multiplication
+ # organized with scalar others first, then array-like
+
+ def test_td64arr_mul_int(self, box_df_fail):
+ box = box_df_fail # DataFrame op returns object instead of m8[ns]
+ idx = TimedeltaIndex(np.arange(5, dtype='int64'))
+ idx = tm.box_expected(idx, box)
+
+ result = idx * 1
+ tm.assert_equal(result, idx)
+
+ result = 1 * idx
+ tm.assert_equal(result, idx)
+
+ def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
+ if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
+ pytest.xfail(reason="returns m8[ns] instead of raising")
+
+ rng = timedelta_range('1 days', '10 days', name='foo')
+ rng = tm.box_expected(rng, box)
+ with pytest.raises(TypeError):
+ rng * delta
+
+ def test_tdi_mul_int_array_zerodim(self, box_df_fail):
+ box = box_df_fail # DataFrame op returns object dtype
+ rng5 = np.arange(5, dtype='int64')
+ idx = TimedeltaIndex(rng5)
+ expected = TimedeltaIndex(rng5 * 5)
+
+ idx = tm.box_expected(idx, box)
+ expected = tm.box_expected(expected, box)
+
+ result = idx * np.array(5, dtype='int64')
+ tm.assert_equal(result, expected)
+
+ def test_tdi_mul_int_array(self, box_df_fail):
+ box = box_df_fail # DataFrame tries to broadcast incorrectly
+ rng5 = np.arange(5, dtype='int64')
+ idx = TimedeltaIndex(rng5)
+ expected = TimedeltaIndex(rng5 ** 2)
+
+ idx = tm.box_expected(idx, box)
+ expected = tm.box_expected(expected, box)
+
+ result = idx * rng5
+ tm.assert_equal(result, expected)
+
+ def test_tdi_mul_int_series(self, box_df_fail):
+ box = box_df_fail # DataFrame tries to broadcast incorrectly
+ idx = TimedeltaIndex(np.arange(5, dtype='int64'))
+ expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
+
+ idx = tm.box_expected(idx, box)
+
+ box2 = pd.Series if box is pd.Index else box
+ expected = tm.box_expected(expected, box2)
+
+ result = idx * pd.Series(np.arange(5, dtype='int64'))
+ tm.assert_equal(result, expected)
+
+ def test_tdi_mul_float_series(self, box_df_fail):
+ box = box_df_fail # DataFrame tries to broadcast incorrectly
+ idx = TimedeltaIndex(np.arange(5, dtype='int64'))
+ idx = tm.box_expected(idx, box)
+
+ rng5f = np.arange(5, dtype='float64')
+ expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
+ box2 = pd.Series if box is pd.Index else box
+ expected = tm.box_expected(expected, box2)
+
+ result = idx * Series(rng5f + 0.1)
+ tm.assert_equal(result, expected)
+
+ # TODO: Put Series/DataFrame in others?
+ @pytest.mark.parametrize('other', [
+ np.arange(1, 11),
+ pd.Int64Index(range(1, 11)),
+ pd.UInt64Index(range(1, 11)),
+ pd.Float64Index(range(1, 11)),
+ pd.RangeIndex(1, 11)
+ ], ids=lambda x: type(x).__name__)
+ def test_tdi_rmul_arraylike(self, other, box_df_fail):
+ # RangeIndex fails to return NotImplemented, for others
+ # DataFrame tries to broadcast incorrectly
+ box = box_df_fail
+
+ tdi = TimedeltaIndex(['1 Day'] * 10)
+ expected = timedelta_range('1 days', '10 days')
+
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
+
+ result = other * tdi
+ tm.assert_equal(result, expected)
+ commute = tdi * other
+ tm.assert_equal(commute, expected)
+
+ # ------------------------------------------------------------------
+ # __div__
+
+ def test_td64arr_div_nat_invalid(self, box_df_fail):
+ # don't allow division by NaT (maybe could in the future)
+ box = box_df_fail # DataFrame returns all-NaT instead of raising
+ rng = timedelta_range('1 days', '10 days', name='foo')
+ rng = tm.box_expected(rng, box)
+ with pytest.raises(TypeError):
+ rng / pd.NaT
+
+ def test_td64arr_div_int(self, box_df_fail):
+ box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
+ idx = TimedeltaIndex(np.arange(5, dtype='int64'))
+ idx = tm.box_expected(idx, box)
+
+ result = idx / 1
+ tm.assert_equal(result, idx)
+
+ def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
+ box = box_df_fail # DataFrame op returns m8[ns] instead of float64
+ rng = timedelta_range('1 days', '10 days', name='foo')
+ expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
+
+ rng = tm.box_expected(rng, box)
+ expected = tm.box_expected(expected, box)
+
+ result = rng / delta
+ tm.assert_equal(result, expected)
+
+ def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
+ box = box_df_fail # DataFrame op returns m8[ns] instead of float64
+ rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
+ expected = pd.Float64Index([12, np.nan, 24], name='foo')
+
+ rng = tm.box_expected(rng, box)
+ expected = tm.box_expected(expected, box)
+
+ result = rng / delta
+ tm.assert_equal(result, expected)
+
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@@ -510,10 +939,6 @@ class TestTimedeltaArraylikeMulDivOps(object):
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
- @pytest.mark.parametrize('scalar_td', [
- timedelta(minutes=5, seconds=4),
- Timedelta('5m4s'),
- Timedelta('5m4s').to_timedelta64()])
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
@@ -534,10 +959,6 @@ def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
- @pytest.mark.parametrize('scalar_td', [
- timedelta(minutes=5, seconds=4),
- Timedelta('5m4s'),
- Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
@@ -559,10 +980,6 @@ def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
- @pytest.mark.parametrize('scalar_td', [
- timedelta(minutes=5, seconds=4),
- Timedelta('5m4s'),
- Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
@@ -578,8 +995,50 @@ def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
+ def test_td64arr_floordiv_int(self, box_df_fail):
+ box = box_df_fail # DataFrame returns object dtype
+ idx = TimedeltaIndex(np.arange(5, dtype='int64'))
+ idx = tm.box_expected(idx, box)
+ result = idx // 1
+ tm.assert_equal(result, idx)
+
+ def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
+ box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
+ tdi = timedelta_range('1 days', '10 days', name='foo')
+ expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
+
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
+
+ result = tdi // delta
+ tm.assert_equal(result, expected)
+
+ # TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
+ @pytest.mark.parametrize('scalar_td', [
+ timedelta(minutes=10, seconds=7),
+ Timedelta('10m7s'),
+ Timedelta('10m7s').to_timedelta64()
+ ], ids=lambda x: type(x).__name__)
+ def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
+ # GH#19125
+ box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
+ tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
+ expected = pd.Index([2.0, 2.0, np.nan])
+
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, box)
+
+ res = tdi.__rfloordiv__(scalar_td)
+ tm.assert_equal(res, expected)
+
+ expected = pd.Index([0.0, 0.0, np.nan])
+ expected = tm.box_expected(expected, box)
+
+ res = tdi // (scalar_td)
+ tm.assert_equal(res, expected)
+
# ------------------------------------------------------------------
- # Operations with timedelta-like others
+ # Operations with invalid others
@pytest.mark.parametrize('box', [
pd.Index,
@@ -590,10 +1049,6 @@ def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
- @pytest.mark.parametrize('scalar_td', [
- timedelta(minutes=5, seconds=4),
- Timedelta('5m4s'),
- Timedelta('5m4s').to_timedelta64()])
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
@@ -609,6 +1064,20 @@ def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
+ def test_td64arr_mul_too_short_raises(self, box):
+ idx = TimedeltaIndex(np.arange(5, dtype='int64'))
+ idx = tm.box_expected(idx, box)
+ with pytest.raises(TypeError):
+ idx * idx[:3]
+ with pytest.raises(ValueError):
+ idx * np.array([1, 2])
+
+ def test_td64arr_mul_td64arr_raises(self, box):
+ idx = TimedeltaIndex(np.arange(5, dtype='int64'))
+ idx = tm.box_expected(idx, box)
+ with pytest.raises(TypeError):
+ idx * idx
+
# ------------------------------------------------------------------
# Operations with numeric others
@@ -675,8 +1144,10 @@ def test_td64arr_div_numeric_scalar(self, box, two, tdser):
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
- Series([20, 30, 40])])
- def test_td64arr_mul_numeric_array(self, box, vector, dtype, tdser):
+ Series([20, 30, 40])],
+ ids=lambda x: type(x).__name__)
+ @pytest.mark.parametrize('op', [operator.mul, ops.rmul])
+ def test_td64arr_rmul_numeric_array(self, op, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
@@ -689,7 +1160,7 @@ def test_td64arr_mul_numeric_array(self, box, vector, dtype, tdser):
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
- result = tdser * vector
+ result = op(vector, tdser)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
@@ -707,35 +1178,6 @@ def test_td64arr_mul_numeric_array(self, box, vector, dtype, tdser):
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
- def test_td64arr_rmul_numeric_array(self, box, vector, dtype, tdser):
- # GH#4521
- # divide/multiply by integers
- vector = vector.astype(dtype)
-
- expected = Series(['1180 Days', '1770 Days', 'NaT'],
- dtype='timedelta64[ns]')
-
- tdser = tm.box_expected(tdser, box)
- box = Series if (box is pd.Index and type(vector) is Series) else box
- expected = tm.box_expected(expected, box)
-
- result = vector * tdser
- tm.assert_equal(result, expected)
-
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="broadcasts along "
- "wrong axis",
- strict=True))
- ], ids=lambda x: x.__name__)
- @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
- 'uint64', 'uint32', 'uint16', 'uint8',
- 'float64', 'float32', 'float16'])
- @pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
- pd.Index([20, 30, 40]),
- Series([20, 30, 40])])
def test_td64arr_div_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
@@ -787,11 +1229,6 @@ def test_td64arr_mul_int_series(self, box, names):
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pd.DataFrame
- ], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
@@ -829,10 +1266,6 @@ class TestTimedeltaArraylikeInvalidArithmeticOps(object):
"instead of TypeError",
strict=True))
])
- @pytest.mark.parametrize('scalar_td', [
- timedelta(minutes=5, seconds=4),
- Timedelta('5m4s'),
- Timedelta('5m4s').to_timedelta64()])
def test_td64arr_pow_invalid(self, scalar_td, box):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
@@ -848,3 +1281,231 @@ def test_td64arr_pow_invalid(self, scalar_td, box):
with tm.assert_raises_regex(TypeError, pattern):
td1 ** scalar_td
+
+
+# ------------------------------------------------------------------
+
+@pytest.fixture(params=[pd.Float64Index(np.arange(5, dtype='float64')),
+ pd.Int64Index(np.arange(5, dtype='int64')),
+ pd.UInt64Index(np.arange(5, dtype='uint64'))],
+ ids=lambda x: type(x).__name__)
+def idx(request):
+ return request.param
+
+
+zeros = [box_cls([0] * 5, dtype=dtype)
+ for box_cls in [pd.Index, np.array]
+ for dtype in [np.int64, np.uint64, np.float64]]
+zeros.extend([np.array(0, dtype=dtype)
+ for dtype in [np.int64, np.uint64, np.float64]])
+zeros.extend([0, 0.0, long(0)])
+
+
+@pytest.fixture(params=zeros)
+def zero(request):
+ # For testing division by (or of) zero for Index with length 5, this
+ # gives several scalar-zeros and length-5 vector-zeros
+ return request.param
+
+
+class TestDivisionByZero(object):
+
+ def test_div_zero(self, zero, idx):
+ expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
+ dtype=np.float64)
+ result = idx / zero
+ tm.assert_index_equal(result, expected)
+ ser_compat = Series(idx).astype('i8') / np.array(zero).astype('i8')
+ tm.assert_series_equal(ser_compat, Series(result))
+
+ def test_floordiv_zero(self, zero, idx):
+ expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
+ dtype=np.float64)
+
+ result = idx // zero
+ tm.assert_index_equal(result, expected)
+ ser_compat = Series(idx).astype('i8') // np.array(zero).astype('i8')
+ tm.assert_series_equal(ser_compat, Series(result))
+
+ def test_mod_zero(self, zero, idx):
+ expected = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan],
+ dtype=np.float64)
+ result = idx % zero
+ tm.assert_index_equal(result, expected)
+ ser_compat = Series(idx).astype('i8') % np.array(zero).astype('i8')
+ tm.assert_series_equal(ser_compat, Series(result))
+
+ def test_divmod_zero(self, zero, idx):
+
+ exleft = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
+ dtype=np.float64)
+ exright = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan],
+ dtype=np.float64)
+
+ result = divmod(idx, zero)
+ tm.assert_index_equal(result[0], exleft)
+ tm.assert_index_equal(result[1], exright)
+
+ # ------------------------------------------------------------------
+
+ @pytest.mark.parametrize('dtype2', [
+ np.int64, np.int32, np.int16, np.int8,
+ np.float64, np.float32, np.float16,
+ np.uint64, np.uint32, np.uint16, np.uint8])
+ @pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64])
+ def test_ser_div_ser(self, dtype1, dtype2):
+ # no longer do integer div for any ops, but deal with the 0's
+ first = Series([3, 4, 5, 8], name='first').astype(dtype1)
+ second = Series([0, 0, 0, 3], name='second').astype(dtype2)
+
+ with np.errstate(all='ignore'):
+ expected = Series(first.values.astype(np.float64) / second.values,
+ dtype='float64', name=None)
+ expected.iloc[0:3] = np.inf
+
+ result = first / second
+ tm.assert_series_equal(result, expected)
+ assert not result.equals(second / first)
+
+ def test_rdiv_zero_compat(self):
+ # GH#8674
+ zero_array = np.array([0] * 5)
+ data = np.random.randn(5)
+ expected = Series([0.] * 5)
+
+ result = zero_array / Series(data)
+ tm.assert_series_equal(result, expected)
+
+ result = Series(zero_array) / data
+ tm.assert_series_equal(result, expected)
+
+ result = Series(zero_array) / Series(data)
+ tm.assert_series_equal(result, expected)
+
+ def test_div_zero_inf_signs(self):
+ # GH#9144, inf signing
+ ser = Series([-1, 0, 1], name='first')
+ expected = Series([-np.inf, np.nan, np.inf], name='first')
+
+ result = ser / 0
+ tm.assert_series_equal(result, expected)
+
+ def test_rdiv_zero(self):
+ # GH#9144
+ ser = Series([-1, 0, 1], name='first')
+ expected = Series([0.0, np.nan, 0.0], name='first')
+
+ result = 0 / ser
+ tm.assert_series_equal(result, expected)
+
+ def test_floordiv_div(self):
+ # GH#9144
+ ser = Series([-1, 0, 1], name='first')
+
+ result = ser // 0
+ expected = Series([-np.inf, np.nan, np.inf], name='first')
+ tm.assert_series_equal(result, expected)
+
+ def test_df_div_zero_df(self):
+ # integer div, but deal with the 0's (GH#9144)
+ df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
+ result = df / df
+
+ first = pd.Series([1.0, 1.0, 1.0, 1.0])
+ second = pd.Series([np.nan, np.nan, np.nan, 1])
+ expected = pd.DataFrame({'first': first, 'second': second})
+ tm.assert_frame_equal(result, expected)
+
+ def test_df_div_zero_array(self):
+ # integer div, but deal with the 0's (GH#9144)
+ df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
+
+ first = pd.Series([1.0, 1.0, 1.0, 1.0])
+ second = pd.Series([np.nan, np.nan, np.nan, 1])
+ expected = pd.DataFrame({'first': first, 'second': second})
+
+ with np.errstate(all='ignore'):
+ arr = df.values.astype('float') / df.values
+ result = pd.DataFrame(arr, index=df.index,
+ columns=df.columns)
+ tm.assert_frame_equal(result, expected)
+
+ def test_df_div_zero_int(self):
+ # integer div, but deal with the 0's (GH#9144)
+ df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
+
+ result = df / 0
+ expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
+ expected.iloc[0:3, 1] = np.nan
+ tm.assert_frame_equal(result, expected)
+
+ # numpy has a slightly different (wrong) treatment
+ with np.errstate(all='ignore'):
+ arr = df.values.astype('float64') / 0
+ result2 = pd.DataFrame(arr, index=df.index,
+ columns=df.columns)
+ tm.assert_frame_equal(result2, expected)
+
+ def test_df_div_zero_series_does_not_commute(self):
+ # integer div, but deal with the 0's (GH#9144)
+ df = pd.DataFrame(np.random.randn(10, 5))
+ ser = df[0]
+ res = ser / df
+ res2 = df / ser
+ assert not res.fillna(0).equals(res2.fillna(0))
+
+ # ------------------------------------------------------------------
+ # Mod By Zero
+
+ def test_df_mod_zero_df(self):
+ # GH#3590, modulo as ints
+ df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
+
+ # this is technically wrong, as the integer portion is coerced to float
+ # ###
+ first = pd.Series([0, 0, 0, 0], dtype='float64')
+ second = pd.Series([np.nan, np.nan, np.nan, 0])
+ expected = pd.DataFrame({'first': first, 'second': second})
+ result = df % df
+ tm.assert_frame_equal(result, expected)
+
+ def test_df_mod_zero_array(self):
+ # GH#3590, modulo as ints
+ df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
+
+ # this is technically wrong, as the integer portion is coerced to float
+ # ###
+ first = pd.Series([0, 0, 0, 0], dtype='float64')
+ second = pd.Series([np.nan, np.nan, np.nan, 0])
+ expected = pd.DataFrame({'first': first, 'second': second})
+
+ # numpy has a slightly different (wrong) treatment
+ with np.errstate(all='ignore'):
+ arr = df.values % df.values
+ result2 = pd.DataFrame(arr, index=df.index,
+ columns=df.columns, dtype='float64')
+ result2.iloc[0:3, 1] = np.nan
+ tm.assert_frame_equal(result2, expected)
+
+ def test_df_mod_zero_int(self):
+ # GH#3590, modulo as ints
+ df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
+
+ result = df % 0
+ expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
+ tm.assert_frame_equal(result, expected)
+
+ # numpy has a slightly different (wrong) treatment
+ with np.errstate(all='ignore'):
+ arr = df.values.astype('float64') % 0
+ result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
+ tm.assert_frame_equal(result2, expected)
+
+ def test_df_mod_zero_series_does_not_commute(self):
+ # GH#3590, modulo as ints
+ # not commutative with series
+ df = pd.DataFrame(np.random.randn(10, 5))
+ ser = df[0]
+ res = ser % df
+ res2 = df % ser
+ assert not res.fillna(0).equals(res2.fillna(0))
| The division by zero tests aren't yet parametrized, are mostly just moved as-is. Both these and the newly moved m8[ns] tests will need a bunch of de-duplication in the next pass.
Tons of DataFrame tests failing. #22074 is the PR we need, though maybe not the one we deserve.
Started in on fixtures. | https://api.github.com/repos/pandas-dev/pandas/pulls/22153 | 2018-08-01T04:29:43Z | 2018-08-07T18:00:46Z | 2018-08-07T18:00:46Z | 2018-08-07T18:04:25Z |
implement tslibs/src to make tslibs self-contained | diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 8c7b92ddeaa81..a5e93640742aa 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -47,8 +47,8 @@ Numeric decoder derived from from TCL library
#include <numpy/npy_math.h> // NOLINT(build/include_order)
#include <stdio.h> // NOLINT(build/include_order)
#include <ultrajson.h> // NOLINT(build/include_order)
-#include <np_datetime.h> // NOLINT(build/include_order)
-#include <np_datetime_strings.h> // NOLINT(build/include_order)
+#include <../../../tslibs/src/datetime/np_datetime.h> // NOLINT(build/include_order)
+#include <../../../tslibs/src/datetime/np_datetime_strings.h> // NOLINT(build/include_order)
#include "datetime.h"
static PyObject *type_decimal;
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/tslibs/src/datetime/np_datetime.c
similarity index 99%
rename from pandas/_libs/src/datetime/np_datetime.c
rename to pandas/_libs/tslibs/src/datetime/np_datetime.c
index 663ec66a35db2..1b33f38441253 100644
--- a/pandas/_libs/src/datetime/np_datetime.c
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime.c
@@ -16,6 +16,10 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#define NO_IMPORT
+#ifndef NPY_NO_DEPRECATED_API
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#endif
+
#include <Python.h>
#include <datetime.h>
diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/tslibs/src/datetime/np_datetime.h
similarity index 96%
rename from pandas/_libs/src/datetime/np_datetime.h
rename to pandas/_libs/tslibs/src/datetime/np_datetime.h
index 04009c6581ac0..9fa85b18dd219 100644
--- a/pandas/_libs/src/datetime/np_datetime.h
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime.h
@@ -17,6 +17,10 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#ifndef PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_
#define PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_
+#ifndef NPY_NO_DEPRECATED_API
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#endif
+
#include <numpy/ndarraytypes.h>
#include <datetime.h>
diff --git a/pandas/_libs/src/datetime/np_datetime_strings.c b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c
similarity index 99%
rename from pandas/_libs/src/datetime/np_datetime_strings.c
rename to pandas/_libs/tslibs/src/datetime/np_datetime_strings.c
index fa96cce1756c8..19ade6fa5add9 100644
--- a/pandas/_libs/src/datetime/np_datetime_strings.c
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c
@@ -22,6 +22,10 @@ This file implements string parsing and creation for NumPy datetime.
#define PY_SSIZE_T_CLEAN
#define NO_IMPORT
+#ifndef NPY_NO_DEPRECATED_API
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#endif
+
#include <Python.h>
#include <time.h>
diff --git a/pandas/_libs/src/datetime/np_datetime_strings.h b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h
similarity index 96%
rename from pandas/_libs/src/datetime/np_datetime_strings.h
rename to pandas/_libs/tslibs/src/datetime/np_datetime_strings.h
index 821bb79b345bd..e9a7fd74b05e5 100644
--- a/pandas/_libs/src/datetime/np_datetime_strings.h
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h
@@ -22,6 +22,10 @@ This file implements string parsing and creation for NumPy datetime.
#ifndef PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
#define PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
+#ifndef NPY_NO_DEPRECATED_API
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#endif
+
/*
* Parses (almost) standard ISO 8601 date strings. The differences are:
*
diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/tslibs/src/period_helper.c
similarity index 96%
rename from pandas/_libs/src/period_helper.c
rename to pandas/_libs/tslibs/src/period_helper.c
index 7dab77131c1a0..4bf3774e35a68 100644
--- a/pandas/_libs/src/period_helper.c
+++ b/pandas/_libs/tslibs/src/period_helper.c
@@ -13,8 +13,12 @@ frequency conversion routines.
See end of file for stuff pandas uses (search for 'pandas').
*/
+#ifndef NPY_NO_DEPRECATED_API
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#endif
+
#include "period_helper.h"
-#include "../datetime/np_datetime.h"
+#include "datetime/np_datetime.h"
/* ------------------------------------------------------------------
* Code derived from scikits.timeseries
@@ -79,9 +83,9 @@ static npy_int64 daytime_conversion_factor_matrix[7][7] = {
int max_value(int a, int b) { return a > b ? a : b; }
-PANDAS_INLINE int min_value(int a, int b) { return a < b ? a : b; }
+static int min_value(int a, int b) { return a < b ? a : b; }
-PANDAS_INLINE int get_freq_group(int freq) { return (freq / 1000) * 1000; }
+static int get_freq_group(int freq) { return (freq / 1000) * 1000; }
npy_int64 get_daytime_conversion_factor(int from_index, int to_index) {
@@ -97,8 +101,7 @@ npy_int64 get_daytime_conversion_factor(int from_index, int to_index) {
return daytime_conversion_factor_matrix[row - 6][col - 6];
}
-PANDAS_INLINE npy_int64 upsample_daytime(npy_int64 ordinal,
- asfreq_info *af_info) {
+static npy_int64 upsample_daytime(npy_int64 ordinal, asfreq_info *af_info) {
if (af_info->is_end) {
return (ordinal + 1) * af_info->intraday_conversion_factor - 1;
} else {
@@ -106,15 +109,14 @@ PANDAS_INLINE npy_int64 upsample_daytime(npy_int64 ordinal,
}
}
-PANDAS_INLINE npy_int64 downsample_daytime(npy_int64 ordinal,
- asfreq_info *af_info) {
+static npy_int64 downsample_daytime(npy_int64 ordinal, asfreq_info *af_info) {
return ordinal / (af_info->intraday_conversion_factor);
}
-PANDAS_INLINE npy_int64 transform_via_day(npy_int64 ordinal,
- asfreq_info *af_info,
- freq_conv_func first_func,
- freq_conv_func second_func) {
+static npy_int64 transform_via_day(npy_int64 ordinal,
+ asfreq_info *af_info,
+ freq_conv_func first_func,
+ freq_conv_func second_func) {
npy_int64 result;
result = (*first_func)(ordinal, af_info);
diff --git a/pandas/_libs/src/period_helper.h b/pandas/_libs/tslibs/src/period_helper.h
similarity index 97%
rename from pandas/_libs/src/period_helper.h
rename to pandas/_libs/tslibs/src/period_helper.h
index 8f538b261db9e..f0198935bd421 100644
--- a/pandas/_libs/src/period_helper.h
+++ b/pandas/_libs/tslibs/src/period_helper.h
@@ -14,9 +14,11 @@ frequency conversion routines.
#ifndef PANDAS__LIBS_SRC_PERIOD_HELPER_H_
#define PANDAS__LIBS_SRC_PERIOD_HELPER_H_
+#ifndef NPY_NO_DEPRECATED_API
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#endif
+
#include <Python.h>
-#include "headers/stdint.h"
-#include "helper.h"
#include "limits.h"
#include "numpy/ndarraytypes.h"
@@ -74,7 +76,7 @@ frequency conversion routines.
#define FR_UND -10000 /* Undefined */
-#define INT_ERR_CODE INT32_MIN
+#define INT_ERR_CODE NPY_MIN_INT32
typedef struct asfreq_info {
int is_end;
diff --git a/setup.py b/setup.py
index f058c8a6e3c99..3289f1e99b87f 100755
--- a/setup.py
+++ b/setup.py
@@ -226,15 +226,15 @@ def initialize_options(self):
self._clean_trees = []
base = pjoin('pandas', '_libs', 'src')
- dt = pjoin(base, 'datetime')
- src = base
+ tsbase = pjoin('pandas', '_libs', 'tslibs', 'src')
+ dt = pjoin(tsbase, 'datetime')
util = pjoin('pandas', 'util')
parser = pjoin(base, 'parser')
ujson_python = pjoin(base, 'ujson', 'python')
ujson_lib = pjoin(base, 'ujson', 'lib')
self._clean_exclude = [pjoin(dt, 'np_datetime.c'),
pjoin(dt, 'np_datetime_strings.c'),
- pjoin(src, 'period_helper.c'),
+ pjoin(tsbase, 'period_helper.c'),
pjoin(parser, 'tokenizer.c'),
pjoin(parser, 'io.c'),
pjoin(ujson_python, 'ujson.c'),
@@ -498,16 +498,19 @@ def srcpath(name=None, suffix='.pyx', subdir='src'):
lib_depends = []
common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src']
+ts_include = ['pandas/_libs/tslibs/src']
lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h',
'pandas/_libs/src/parse_helper.h',
'pandas/_libs/src/compat_helper.h']
-np_datetime_headers = ['pandas/_libs/src/datetime/np_datetime.h',
- 'pandas/_libs/src/datetime/np_datetime_strings.h']
-np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c',
- 'pandas/_libs/src/datetime/np_datetime_strings.c']
+np_datetime_headers = [
+ 'pandas/_libs/tslibs/src/datetime/np_datetime.h',
+ 'pandas/_libs/tslibs/src/datetime/np_datetime_strings.h']
+np_datetime_sources = [
+ 'pandas/_libs/tslibs/src/datetime/np_datetime.c',
+ 'pandas/_libs/tslibs/src/datetime/np_datetime_strings.c']
tseries_depends = np_datetime_headers
@@ -520,13 +523,16 @@ def srcpath(name=None, suffix='.pyx', subdir='src'):
'pyxfile': '_libs/groupby',
'depends': _pxi_dep['groupby']},
'_libs.hashing': {
- 'pyxfile': '_libs/hashing'},
+ 'pyxfile': '_libs/hashing',
+ 'include': [],
+ 'depends': []},
'_libs.hashtable': {
'pyxfile': '_libs/hashtable',
'depends': (['pandas/_libs/src/klib/khash_python.h'] +
_pxi_dep['hashtable'])},
'_libs.index': {
'pyxfile': '_libs/index',
+ 'include': common_include + ts_include,
'depends': _pxi_dep['index'],
'sources': np_datetime_sources},
'_libs.indexing': {
@@ -541,9 +547,11 @@ def srcpath(name=None, suffix='.pyx', subdir='src'):
'depends': _pxi_dep['join']},
'_libs.lib': {
'pyxfile': '_libs/lib',
+ 'include': common_include + ts_include,
'depends': lib_depends + tseries_depends},
'_libs.missing': {
'pyxfile': '_libs/missing',
+ 'include': common_include + ts_include,
'depends': tseries_depends},
'_libs.parsers': {
'pyxfile': '_libs/parsers',
@@ -570,54 +578,71 @@ def srcpath(name=None, suffix='.pyx', subdir='src'):
'depends': _pxi_dep['sparse']},
'_libs.tslib': {
'pyxfile': '_libs/tslib',
+ 'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.ccalendar': {
- 'pyxfile': '_libs/tslibs/ccalendar'},
+ 'pyxfile': '_libs/tslibs/ccalendar',
+ 'include': []},
'_libs.tslibs.conversion': {
'pyxfile': '_libs/tslibs/conversion',
+ 'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.fields': {
'pyxfile': '_libs/tslibs/fields',
+ 'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.frequencies': {
- 'pyxfile': '_libs/tslibs/frequencies'},
+ 'pyxfile': '_libs/tslibs/frequencies',
+ 'include': []},
'_libs.tslibs.nattype': {
- 'pyxfile': '_libs/tslibs/nattype'},
+ 'pyxfile': '_libs/tslibs/nattype',
+ 'include': []},
'_libs.tslibs.np_datetime': {
'pyxfile': '_libs/tslibs/np_datetime',
+ 'include': ts_include,
'depends': np_datetime_headers,
'sources': np_datetime_sources},
'_libs.tslibs.offsets': {
'pyxfile': '_libs/tslibs/offsets',
+ 'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.parsing': {
- 'pyxfile': '_libs/tslibs/parsing'},
+ 'pyxfile': '_libs/tslibs/parsing',
+ 'include': []},
'_libs.tslibs.period': {
'pyxfile': '_libs/tslibs/period',
- 'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'],
- 'sources': np_datetime_sources + ['pandas/_libs/src/period_helper.c']},
+ 'include': ts_include,
+ 'depends': tseries_depends + [
+ 'pandas/_libs/tslibs/src/period_helper.h'],
+ 'sources': np_datetime_sources + [
+ 'pandas/_libs/tslibs/src/period_helper.c']},
'_libs.tslibs.resolution': {
'pyxfile': '_libs/tslibs/resolution',
+ 'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.strptime': {
'pyxfile': '_libs/tslibs/strptime',
+ 'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.timedeltas': {
'pyxfile': '_libs/tslibs/timedeltas',
+ 'include': ts_include,
'depends': np_datetime_headers,
'sources': np_datetime_sources},
'_libs.tslibs.timestamps': {
'pyxfile': '_libs/tslibs/timestamps',
+ 'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.timezones': {
- 'pyxfile': '_libs/tslibs/timezones'},
+ 'pyxfile': '_libs/tslibs/timezones',
+ 'include': []},
'_libs.testing': {
'pyxfile': '_libs/testing'},
'_libs.window': {
| https://api.github.com/repos/pandas-dev/pandas/pulls/22152 | 2018-08-01T02:16:09Z | 2018-08-02T10:18:16Z | 2018-08-02T10:18:16Z | 2018-08-08T15:51:40Z | |
Catch IntervalIndex.itemsize deprecation warning in tests and remove IntervalArray.itemsize | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 60464bcfda1e7..76614454e5a10 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -688,10 +688,6 @@ def size(self):
def shape(self):
return self.left.shape
- @property
- def itemsize(self):
- return self.left.itemsize + self.right.itemsize
-
def take(self, indices, allow_fill=False, fill_value=None, axis=None,
**kwargs):
"""
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 0b467760d82d9..838b12468e85e 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -369,8 +369,14 @@ def shape(self):
@property
def itemsize(self):
- # Avoid materializing ndarray[Interval]
- return self._data.itemsize
+ msg = ('IntervalIndex.itemsize is deprecated and will be removed in '
+ 'a future version')
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+
+ # supress the warning from the underlying left/right itemsize
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ return self.left.itemsize + self.right.itemsize
def __len__(self):
return len(self.left)
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index e179286e839db..71f56c5bc1164 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -989,9 +989,11 @@ def test_itemsize(self):
# GH 19209
left = np.arange(0, 4, dtype='i8')
right = np.arange(1, 5, dtype='i8')
-
- result = IntervalIndex.from_arrays(left, right).itemsize
expected = 16 # 8 * 2
+
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = IntervalIndex.from_arrays(left, right).itemsize
+
assert result == expected
@pytest.mark.parametrize('new_closed', [
| - [X] closes #22049
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
I don't think a whatsnew entry is needed. This was already deprecated for `IntervalIndex`, but wasn't being caught in the tests, and was displaying the name from the underlying `left`/`right` values. Likewise, `IntervalArray` was created after the deprecation, so no reason to even add it.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22149 | 2018-07-31T23:22:38Z | 2018-08-01T01:07:54Z | 2018-08-01T01:07:54Z | 2018-09-24T17:22:26Z |
use memoryviews instead of ndarrays | diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index ff92ee306288a..16cfde620d269 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -5,7 +5,7 @@
import cython
import numpy as np
-from numpy cimport ndarray, uint8_t, uint32_t, uint64_t
+from numpy cimport uint8_t, uint32_t, uint64_t
from util cimport _checknull
from cpython cimport (PyBytes_Check,
@@ -17,7 +17,7 @@ DEF dROUNDS = 4
@cython.boundscheck(False)
-def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'):
+def hash_object_array(object[:] arr, object key, object encoding='utf8'):
"""
Parameters
----------
@@ -37,7 +37,7 @@ def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'):
"""
cdef:
Py_ssize_t i, l, n
- ndarray[uint64_t] result
+ uint64_t[:] result
bytes data, k
uint8_t *kb
uint64_t *lens
@@ -89,7 +89,7 @@ def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'):
free(vecs)
free(lens)
- return result
+ return result.base # .base to retrieve underlying np.ndarray
cdef inline uint64_t _rotl(uint64_t x, uint64_t b) nogil:
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 76e3d6e92d31e..eba553bfaeb48 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# cython: profile=False
-cimport cython
from cython cimport Py_ssize_t
from cpython cimport PyFloat_Check, PyUnicode_Check
@@ -37,8 +36,7 @@ from tslibs.np_datetime import OutOfBoundsDatetime
from tslibs.parsing import parse_datetime_string
from tslibs.timedeltas cimport cast_from_unit
-from tslibs.timezones cimport (is_utc, is_tzlocal, is_fixed_offset,
- treat_tz_as_pytz, get_dst_info)
+from tslibs.timezones cimport is_utc, is_tzlocal, get_dst_info
from tslibs.conversion cimport (tz_convert_single, _TSObject,
convert_datetime_to_tsobject,
get_datetime64_nanos,
@@ -77,8 +75,7 @@ cdef inline object create_time_from_ts(
return time(dts.hour, dts.min, dts.sec, dts.us)
-def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None,
- box="datetime"):
+def ints_to_pydatetime(int64_t[:] arr, tz=None, freq=None, box="datetime"):
"""
Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp
@@ -102,7 +99,9 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None,
cdef:
Py_ssize_t i, n = len(arr)
- ndarray[int64_t] trans, deltas
+ ndarray[int64_t] trans
+ int64_t[:] deltas
+ Py_ssize_t pos
npy_datetimestruct dts
object dt
int64_t value, delta
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 4335e7baeafe9..a459b185fa48c 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -91,7 +91,7 @@ def ensure_datetime64ns(ndarray arr, copy=True):
"""
cdef:
Py_ssize_t i, n = arr.size
- ndarray[int64_t] ivalues, iresult
+ int64_t[:] ivalues, iresult
NPY_DATETIMEUNIT unit
npy_datetimestruct dts
@@ -139,7 +139,7 @@ def ensure_timedelta64ns(ndarray arr, copy=True):
return arr.astype(TD_DTYPE, copy=copy)
-def datetime_to_datetime64(ndarray[object] values):
+def datetime_to_datetime64(object[:] values):
"""
Convert ndarray of datetime-like objects to int64 array representing
nanosecond timestamps.
@@ -156,7 +156,7 @@ def datetime_to_datetime64(ndarray[object] values):
cdef:
Py_ssize_t i, n = len(values)
object val, inferred_tz = None
- ndarray[int64_t] iresult
+ int64_t[:] iresult
npy_datetimestruct dts
_TSObject _ts
@@ -525,7 +525,8 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz):
Sets obj.tzinfo inplace, alters obj.dts inplace.
"""
cdef:
- ndarray[int64_t] trans, deltas
+ ndarray[int64_t] trans
+ int64_t[:] deltas
int64_t local_val
Py_ssize_t pos
@@ -631,15 +632,16 @@ cdef inline int64_t[:] _tz_convert_dst(ndarray[int64_t] values, tzinfo tz,
cdef:
Py_ssize_t n = len(values)
Py_ssize_t i, j, pos
- ndarray[int64_t] result = np.empty(n, dtype=np.int64)
- ndarray[int64_t] tt, trans, deltas
- ndarray[Py_ssize_t] posn
+ int64_t[:] result = np.empty(n, dtype=np.int64)
+ ndarray[int64_t] tt, trans
+ int64_t[:] deltas
+ Py_ssize_t[:] posn
int64_t v
trans, deltas, typ = get_dst_info(tz)
if not to_utc:
# We add `offset` below instead of subtracting it
- deltas = -1 * deltas
+ deltas = -1 * np.array(deltas, dtype='i8')
tt = values[values != NPY_NAT]
if not len(tt):
@@ -728,7 +730,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2):
converted: int64
"""
cdef:
- ndarray[int64_t] trans, deltas
+ int64_t[:] deltas
Py_ssize_t pos
int64_t v, offset, utc_date
npy_datetimestruct dts
@@ -756,7 +758,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2):
else:
# Convert UTC to other timezone
arr = np.array([utc_date])
- # Note: at least with cython 0.28.3, doing a looking `[0]` in the next
+ # Note: at least with cython 0.28.3, doing a lookup `[0]` in the next
# line is sensitive to the declared return type of _tz_convert_dst;
# if it is declared as returning ndarray[int64_t], a compile-time error
# is raised.
@@ -781,10 +783,9 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
"""
cdef:
- ndarray[int64_t] utc_dates, tt, result, trans, deltas
+ ndarray[int64_t] utc_dates, result
Py_ssize_t i, j, pos, n = len(vals)
- int64_t v, offset, delta
- npy_datetimestruct dts
+ int64_t v
if len(vals) == 0:
return np.array([], dtype=np.int64)
@@ -843,7 +844,8 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
localized : ndarray[int64_t]
"""
cdef:
- ndarray[int64_t] trans, deltas, idx_shifted
+ ndarray[int64_t] trans
+ int64_t[:] deltas, idx_shifted
ndarray ambiguous_array
Py_ssize_t i, idx, pos, ntrans, n = len(vals)
int64_t *tdata
@@ -1069,7 +1071,7 @@ def normalize_date(object dt):
@cython.wraparound(False)
@cython.boundscheck(False)
-def normalize_i8_timestamps(ndarray[int64_t] stamps, tz=None):
+def normalize_i8_timestamps(int64_t[:] stamps, tz=None):
"""
Normalize each of the (nanosecond) timestamps in the given array by
rounding down to the beginning of the day (i.e. midnight). If `tz`
@@ -1087,7 +1089,7 @@ def normalize_i8_timestamps(ndarray[int64_t] stamps, tz=None):
cdef:
Py_ssize_t i, n = len(stamps)
npy_datetimestruct dts
- ndarray[int64_t] result = np.empty(n, dtype=np.int64)
+ int64_t[:] result = np.empty(n, dtype=np.int64)
if tz is not None:
tz = maybe_get_tz(tz)
@@ -1101,12 +1103,12 @@ def normalize_i8_timestamps(ndarray[int64_t] stamps, tz=None):
dt64_to_dtstruct(stamps[i], &dts)
result[i] = _normalized_stamp(&dts)
- return result
+ return result.base # .base to access underlying np.ndarray
@cython.wraparound(False)
@cython.boundscheck(False)
-cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz):
+cdef int64_t[:] _normalize_local(int64_t[:] stamps, object tz):
"""
Normalize each of the (nanosecond) timestamps in the given array by
rounding down to the beginning of the day (i.e. midnight) for the
@@ -1123,8 +1125,9 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz):
"""
cdef:
Py_ssize_t n = len(stamps)
- ndarray[int64_t] result = np.empty(n, dtype=np.int64)
- ndarray[int64_t] trans, deltas
+ int64_t[:] result = np.empty(n, dtype=np.int64)
+ ndarray[int64_t] trans
+ int64_t[:] deltas
Py_ssize_t[:] pos
npy_datetimestruct dts
int64_t delta
@@ -1190,7 +1193,7 @@ cdef inline int64_t _normalized_stamp(npy_datetimestruct *dts) nogil:
return dtstruct_to_dt64(dts)
-def is_date_array_normalized(ndarray[int64_t] stamps, tz=None):
+def is_date_array_normalized(int64_t[:] stamps, tz=None):
"""
Check if all of the given (nanosecond) timestamps are normalized to
midnight, i.e. hour == minute == second == 0. If the optional timezone
@@ -1206,8 +1209,9 @@ def is_date_array_normalized(ndarray[int64_t] stamps, tz=None):
is_normalized : bool True if all stamps are normalized
"""
cdef:
- Py_ssize_t i, n = len(stamps)
- ndarray[int64_t] trans, deltas
+ Py_ssize_t pos, i, n = len(stamps)
+ ndarray[int64_t] trans
+ int64_t[:] deltas
npy_datetimestruct dts
int64_t local_val, delta
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index a298f521ef853..96f023f7fdafe 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -85,8 +85,7 @@ def build_field_sarray(ndarray[int64_t] dtindex):
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_date_name_field(ndarray[int64_t] dtindex, object field,
- object locale=None):
+def get_date_name_field(int64_t[:] dtindex, object field, object locale=None):
"""
Given a int64-based datetime index, return array of strings of date
name based on requested field (e.g. weekday_name)
@@ -134,7 +133,7 @@ def get_date_name_field(ndarray[int64_t] dtindex, object field,
@cython.wraparound(False)
-def get_start_end_field(ndarray[int64_t] dtindex, object field,
+def get_start_end_field(int64_t[:] dtindex, object field,
object freqstr=None, int month_kw=12):
"""
Given an int64-based datetime index return array of indicators
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index ffa3d8df44be8..afda2046fd12d 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -14,7 +14,6 @@ from cpython.datetime cimport datetime
import time
import numpy as np
-from numpy cimport ndarray
# Avoid import from outside _libs
if sys.version_info.major == 2:
@@ -381,11 +380,11 @@ cpdef object _get_rule_month(object source, object default='DEC'):
# Parsing for type-inference
-def try_parse_dates(ndarray[object] values, parser=None,
+def try_parse_dates(object[:] values, parser=None,
dayfirst=False, default=None):
cdef:
Py_ssize_t i, n
- ndarray[object] result
+ object[:] result
n = len(values)
result = np.empty(n, dtype='O')
@@ -420,15 +419,15 @@ def try_parse_dates(ndarray[object] values, parser=None,
# raise if passed parser and it failed
raise
- return result
+ return result.base # .base to access underlying ndarray
-def try_parse_date_and_time(ndarray[object] dates, ndarray[object] times,
+def try_parse_date_and_time(object[:] dates, object[:] times,
date_parser=None, time_parser=None,
dayfirst=False, default=None):
cdef:
Py_ssize_t i, n
- ndarray[object] result
+ object[:] result
n = len(dates)
if len(times) != n:
@@ -457,14 +456,14 @@ def try_parse_date_and_time(ndarray[object] dates, ndarray[object] times,
result[i] = datetime(d.year, d.month, d.day,
t.hour, t.minute, t.second)
- return result
+ return result.base # .base to access underlying ndarray
-def try_parse_year_month_day(ndarray[object] years, ndarray[object] months,
- ndarray[object] days):
+def try_parse_year_month_day(object[:] years, object[:] months,
+ object[:] days):
cdef:
Py_ssize_t i, n
- ndarray[object] result
+ object[:] result
n = len(years)
if len(months) != n or len(days) != n:
@@ -474,19 +473,19 @@ def try_parse_year_month_day(ndarray[object] years, ndarray[object] months,
for i in range(n):
result[i] = datetime(int(years[i]), int(months[i]), int(days[i]))
- return result
+ return result.base # .base to access underlying ndarray
-def try_parse_datetime_components(ndarray[object] years,
- ndarray[object] months,
- ndarray[object] days,
- ndarray[object] hours,
- ndarray[object] minutes,
- ndarray[object] seconds):
+def try_parse_datetime_components(object[:] years,
+ object[:] months,
+ object[:] days,
+ object[:] hours,
+ object[:] minutes,
+ object[:] seconds):
cdef:
Py_ssize_t i, n
- ndarray[object] result
+ object[:] result
int secs
double float_secs
double micros
@@ -509,7 +508,7 @@ def try_parse_datetime_components(ndarray[object] years,
int(hours[i]), int(minutes[i]), secs,
int(micros))
- return result
+ return result.base # .base to access underlying ndarray
# ----------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 96d7994bdc822..811f0d25c3838 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -423,13 +423,13 @@ cdef inline int month_to_quarter(int month):
@cython.wraparound(False)
@cython.boundscheck(False)
-def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None):
+def dt64arr_to_periodarr(int64_t[:] dtarr, int freq, tz=None):
"""
Convert array of datetime64 values (passed in as 'i8' dtype) to a set of
periods corresponding to desired frequency, per period convention.
"""
cdef:
- ndarray[int64_t] out
+ int64_t[:] out
Py_ssize_t i, l
npy_datetimestruct dts
@@ -447,18 +447,18 @@ def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None):
out[i] = get_period_ordinal(&dts, freq)
else:
out = localize_dt64arr_to_period(dtarr, freq, tz)
- return out
+ return out.base # .base to access underlying np.ndarray
@cython.wraparound(False)
@cython.boundscheck(False)
-def periodarr_to_dt64arr(ndarray[int64_t] periodarr, int freq):
+def periodarr_to_dt64arr(int64_t[:] periodarr, int freq):
"""
Convert array to datetime64 values from a set of ordinals corresponding to
periods per period convention.
"""
cdef:
- ndarray[int64_t] out
+ int64_t[:] out
Py_ssize_t i, l
l = len(periodarr)
@@ -472,7 +472,7 @@ def periodarr_to_dt64arr(ndarray[int64_t] periodarr, int freq):
continue
out[i] = period_ordinal_to_dt64(periodarr[i], freq)
- return out
+ return out.base # .base to access underlying np.ndarray
cpdef int64_t period_asfreq(int64_t ordinal, int freq1, int freq2, bint end):
@@ -556,7 +556,7 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end):
if upsampling, choose to use start ('S') or end ('E') of period.
"""
cdef:
- ndarray[int64_t] result
+ int64_t[:] result
Py_ssize_t i, n
freq_conv_func func
asfreq_info af_info
@@ -584,7 +584,7 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end):
raise ValueError("Unable to convert to desired frequency.")
result[i] = val
- return result
+ return result.base # .base to access underlying np.ndarray
cpdef int64_t period_ordinal(int y, int m, int d, int h, int min,
@@ -825,10 +825,10 @@ cdef int pdays_in_month(int64_t ordinal, int freq):
return ccalendar.get_days_in_month(dts.year, dts.month)
-def get_period_field_arr(int code, ndarray[int64_t] arr, int freq):
+def get_period_field_arr(int code, int64_t[:] arr, int freq):
cdef:
Py_ssize_t i, sz
- ndarray[int64_t] out
+ int64_t[:] out
accessor f
func = _get_accessor_func(code)
@@ -844,7 +844,7 @@ def get_period_field_arr(int code, ndarray[int64_t] arr, int freq):
continue
out[i] = func(arr[i], freq)
- return out
+ return out.base # .base to access underlying np.ndarray
cdef accessor _get_accessor_func(int code):
@@ -875,10 +875,10 @@ cdef accessor _get_accessor_func(int code):
return NULL
-def extract_ordinals(ndarray[object] values, freq):
+def extract_ordinals(object[:] values, freq):
cdef:
Py_ssize_t i, n = len(values)
- ndarray[int64_t] ordinals = np.empty(n, dtype=np.int64)
+ int64_t[:] ordinals = np.empty(n, dtype=np.int64)
object p
freqstr = Period._maybe_convert_freq(freq).freqstr
@@ -904,10 +904,10 @@ def extract_ordinals(ndarray[object] values, freq):
else:
ordinals[i] = p.ordinal
- return ordinals
+ return ordinals.base # .base to access underlying np.ndarray
-def extract_freq(ndarray[object] values):
+def extract_freq(object[:] values):
cdef:
Py_ssize_t i, n = len(values)
object p
@@ -930,12 +930,13 @@ def extract_freq(ndarray[object] values):
@cython.wraparound(False)
@cython.boundscheck(False)
-cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
- int freq, object tz):
+cdef int64_t[:] localize_dt64arr_to_period(int64_t[:] stamps,
+ int freq, object tz):
cdef:
Py_ssize_t n = len(stamps)
- ndarray[int64_t] result = np.empty(n, dtype=np.int64)
- ndarray[int64_t] trans, deltas
+ int64_t[:] result = np.empty(n, dtype=np.int64)
+ ndarray[int64_t] trans
+ int64_t[:] deltas
Py_ssize_t[:] pos
npy_datetimestruct dts
int64_t local_val
diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx
index 0659e2a553e7e..18cc21ccd59e0 100644
--- a/pandas/_libs/tslibs/resolution.pyx
+++ b/pandas/_libs/tslibs/resolution.pyx
@@ -31,7 +31,7 @@ cdef int RESO_DAY = 6
# ----------------------------------------------------------------------
-cpdef resolution(ndarray[int64_t] stamps, tz=None):
+cpdef resolution(int64_t[:] stamps, tz=None):
cdef:
Py_ssize_t i, n = len(stamps)
npy_datetimestruct dts
@@ -42,11 +42,12 @@ cpdef resolution(ndarray[int64_t] stamps, tz=None):
return _reso_local(stamps, tz)
-cdef _reso_local(ndarray[int64_t] stamps, object tz):
+cdef _reso_local(int64_t[:] stamps, object tz):
cdef:
Py_ssize_t i, n = len(stamps)
int reso = RESO_DAY, curr_reso
- ndarray[int64_t] trans, deltas
+ ndarray[int64_t] trans
+ int64_t[:] deltas
Py_ssize_t[:] pos
npy_datetimestruct dts
int64_t local_val, delta
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index de2b7440156a7..59d673881bb40 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -26,7 +26,7 @@ from cython cimport Py_ssize_t
from cpython cimport PyFloat_Check
import numpy as np
-from numpy cimport ndarray, int64_t
+from numpy cimport int64_t
from datetime import date as datetime_date
@@ -60,7 +60,7 @@ cdef dict _parse_code_table = {'y': 0,
'z': 19}
-def array_strptime(ndarray[object] values, object fmt,
+def array_strptime(object[:] values, object fmt,
bint exact=True, errors='raise'):
"""
Calculates the datetime structs represented by the passed array of strings
@@ -76,8 +76,8 @@ def array_strptime(ndarray[object] values, object fmt,
cdef:
Py_ssize_t i, n = len(values)
npy_datetimestruct dts
- ndarray[int64_t] iresult
- ndarray[object] result_timezone
+ int64_t[:] iresult
+ object[:] result_timezone
int year, month, day, minute, hour, second, weekday, julian
int week_of_year, week_of_year_start, parse_code, ordinal
int64_t us, ns
@@ -320,7 +320,7 @@ def array_strptime(ndarray[object] values, object fmt,
result_timezone[i] = timezone
- return result, result_timezone
+ return result, result_timezone.base
"""_getlang, LocaleTime, TimeRE, _calc_julian_from_U_or_W are vendored
diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd
index 3e7b88b208e89..2413c281e0a52 100644
--- a/pandas/_libs/tslibs/timedeltas.pxd
+++ b/pandas/_libs/tslibs/timedeltas.pxd
@@ -3,11 +3,11 @@
from cpython.datetime cimport timedelta
-from numpy cimport int64_t, ndarray
+from numpy cimport int64_t
# Exposed for tslib, not intended for outside use.
cdef parse_timedelta_string(object ts)
cpdef int64_t cast_from_unit(object ts, object unit) except? -1
cpdef int64_t delta_to_nanoseconds(delta) except? -1
cpdef convert_to_timedelta64(object ts, object unit)
-cpdef array_to_timedelta64(ndarray[object] values, unit=*, errors=*)
+cpdef array_to_timedelta64(object[:] values, unit=*, errors=*)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index f7a6cf0c6dafc..9e7f1d94934ba 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -13,7 +13,7 @@ from cpython cimport PyUnicode_Check, Py_NE, Py_EQ, PyObject_RichCompare
import numpy as np
cimport numpy as cnp
-from numpy cimport int64_t, ndarray
+from numpy cimport int64_t
cnp.import_array()
from cpython.datetime cimport (datetime, timedelta,
@@ -83,7 +83,7 @@ _no_input = object()
# ----------------------------------------------------------------------
# API
-def ints_to_pytimedelta(ndarray[int64_t] arr, box=False):
+def ints_to_pytimedelta(int64_t[:] arr, box=False):
"""
convert an i8 repr to an ndarray of timedelta or Timedelta (if box ==
True)
@@ -101,7 +101,7 @@ def ints_to_pytimedelta(ndarray[int64_t] arr, box=False):
cdef:
Py_ssize_t i, n = len(arr)
int64_t value
- ndarray[object] result = np.empty(n, dtype=object)
+ object[:] result = np.empty(n, dtype=object)
for i in range(n):
@@ -114,7 +114,7 @@ def ints_to_pytimedelta(ndarray[int64_t] arr, box=False):
else:
result[i] = timedelta(microseconds=int(value) / 1000)
- return result
+ return result.base # .base to access underlying np.ndarray
# ----------------------------------------------------------------------
@@ -199,7 +199,7 @@ cpdef convert_to_timedelta64(object ts, object unit):
return ts.astype('timedelta64[ns]')
-cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'):
+cpdef array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
"""
Convert an ndarray to an array of timedeltas. If errors == 'coerce',
coerce non-convertible objects to NaT. Otherwise, raise.
@@ -207,7 +207,7 @@ cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'):
cdef:
Py_ssize_t i, n
- ndarray[int64_t] iresult
+ int64_t[:] iresult
if errors not in ('ignore', 'raise', 'coerce'):
raise ValueError("errors must be one of 'ignore', "
@@ -233,7 +233,7 @@ cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'):
else:
raise
- return iresult
+ return iresult.base # .base to access underlying np.ndarray
cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index be988e7247e59..eb5c0076a868a 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -7,7 +7,7 @@ from cpython cimport (PyObject_RichCompareBool, PyObject_RichCompare,
import numpy as np
cimport numpy as cnp
-from numpy cimport int64_t, int32_t, ndarray
+from numpy cimport int64_t, int32_t, int8_t
cnp.import_array()
from datetime import time as datetime_time
@@ -342,7 +342,7 @@ cdef class _Timestamp(datetime):
cdef:
int64_t val
dict kwds
- ndarray out
+ int8_t out[1]
int month_kw
freq = self.freq
@@ -362,7 +362,7 @@ cdef class _Timestamp(datetime):
cpdef _get_date_name_field(self, object field, object locale):
cdef:
int64_t val
- ndarray out
+ object[:] out
val = self._maybe_convert_value_to_local()
out = get_date_name_field(np.array([val], dtype=np.int64),
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index 2e3b07252d45e..a787452d90c07 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
# cython: profile=False
-cimport cython
from cython cimport Py_ssize_t
# dateutil compat
@@ -19,7 +18,7 @@ UTC = pytz.utc
import numpy as np
cimport numpy as cnp
-from numpy cimport ndarray, int64_t
+from numpy cimport int64_t
cnp.import_array()
# ----------------------------------------------------------------------
@@ -188,10 +187,10 @@ cdef object get_utc_trans_times_from_dateutil_tz(object tz):
return new_trans
-cpdef ndarray[int64_t, ndim=1] unbox_utcoffsets(object transinfo):
+cpdef int64_t[:] unbox_utcoffsets(object transinfo):
cdef:
Py_ssize_t i, sz
- ndarray[int64_t] arr
+ int64_t[:] arr
sz = len(transinfo)
arr = np.empty(sz, dtype='i8')
| For lookups and slicing memoryviews are supposed to be more performant than ndarrays. I haven't been able to detect any difference. @xhochy IIRC you've looked at this in some depth; are memoryviews unambiguously better for our purposes?
Getting rid of `from numpy cimport [...]` is necessary (but not sufficient) for getting rid of numpy 1.7 deprecation warnings. | https://api.github.com/repos/pandas-dev/pandas/pulls/22147 | 2018-07-31T21:27:59Z | 2018-08-01T21:53:24Z | 2018-08-01T21:53:24Z | 2018-08-01T22:16:39Z |
Replace (private) _checknull with (public) is_nan | diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index 16cfde620d269..a9775d3950187 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -3,14 +3,14 @@
# at https://github.com/veorq/SipHash
import cython
+from cpython cimport PyBytes_Check, PyUnicode_Check
+from libc.stdlib cimport malloc, free
import numpy as np
-from numpy cimport uint8_t, uint32_t, uint64_t
+from numpy cimport uint8_t, uint32_t, uint64_t, import_array
+import_array()
-from util cimport _checknull
-from cpython cimport (PyBytes_Check,
- PyUnicode_Check)
-from libc.stdlib cimport malloc, free
+from util cimport is_nan
DEF cROUNDS = 2
DEF dROUNDS = 4
@@ -65,7 +65,7 @@ def hash_object_array(object[:] arr, object key, object encoding='utf8'):
data = <bytes>val
elif PyUnicode_Check(val):
data = <bytes>val.encode(encoding)
- elif _checknull(val):
+ elif val is None or is_nan(val):
# null, stringify and encode
data = <bytes>str(val).encode(encoding)
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 40fdfedaa23d5..22153b58cc49b 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -12,6 +12,7 @@ from numpy cimport ndarray
cimport util
+util.import_array()
from tslibs import Timestamp
from tslibs.timezones cimport tz_compare
@@ -391,7 +392,7 @@ cpdef intervals_to_interval_bounds(ndarray intervals,
for i in range(len(intervals)):
interval = intervals[i]
- if util._checknull(interval):
+ if interval is None or util.is_nan(interval):
left[i] = np.nan
right[i] = np.nan
continue
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 4cc119a700ca0..d80b5fd2bd0b9 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -34,7 +34,7 @@ from missing cimport checknull
cimport util
cdef int64_t NPY_NAT = util.get_nat()
-from util cimport is_array, _checknull
+from util cimport is_array, is_nan
def values_from_object(object o):
@@ -429,7 +429,7 @@ cpdef bint array_equivalent_object(object[:] left, object[:] right):
# we are either not equal or both nan
# I think None == None will be true here
if not (PyObject_RichCompareBool(x, y, Py_EQ) or
- _checknull(x) and _checknull(y)):
+ (x is None or is_nan(x)) and (y is None or is_nan(y))):
return False
return True
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 6161a55b22342..e9c3cf12eb328 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -74,7 +74,7 @@ cpdef bint checknull(object val):
elif util.is_array(val):
return False
else:
- return util._checknull(val)
+ return val is None or util.is_nan(val)
cpdef bint checknull_old(object val):
@@ -113,7 +113,7 @@ cpdef bint checknull_old(object val):
elif util.is_array(val):
return False
else:
- return util._checknull(val)
+ return val is None or util.is_nan(val)
cdef inline bint _check_none_nan_inf_neginf(object val):
@@ -297,7 +297,7 @@ cpdef bint isneginf_scalar(object val):
cdef inline bint is_null_datetime64(v):
# determine if we have a null for a datetime (or integer versions),
# excluding np.timedelta64('nat')
- if util._checknull(v):
+ if v is None or util.is_nan(v):
return True
elif v is NaT:
return True
@@ -309,7 +309,7 @@ cdef inline bint is_null_datetime64(v):
cdef inline bint is_null_timedelta64(v):
# determine if we have a null for a timedelta (or integer versions),
# excluding np.datetime64('nat')
- if util._checknull(v):
+ if v is None or util.is_nan(v):
return True
elif v is NaT:
return True
@@ -321,7 +321,7 @@ cdef inline bint is_null_timedelta64(v):
cdef inline bint is_null_period(v):
# determine if we have a null for a Period (or integer versions),
# excluding np.datetime64('nat') and np.timedelta64('nat')
- if util._checknull(v):
+ if v is None or util.is_nan(v):
return True
elif v is NaT:
return True
diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx
index 72a0baa763c8a..ec9d8304f9243 100644
--- a/pandas/_libs/ops.pyx
+++ b/pandas/_libs/ops.pyx
@@ -10,10 +10,11 @@ cimport cython
from cython cimport Py_ssize_t
import numpy as np
-from numpy cimport ndarray, uint8_t
+from numpy cimport ndarray, uint8_t, import_array
+import_array()
-from util cimport UINT8_MAX, _checknull
+from util cimport UINT8_MAX, is_nan
from missing cimport checknull
@@ -190,13 +191,13 @@ def scalar_binop(ndarray[object] values, object val, object op):
object x
result = np.empty(n, dtype=object)
- if _checknull(val):
+ if val is None or is_nan(val):
result.fill(val)
return result
for i in range(n):
x = values[i]
- if _checknull(x):
+ if x is None or is_nan(x):
result[i] = x
else:
result[i] = op(x, val)
@@ -237,9 +238,9 @@ def vec_binop(ndarray[object] left, ndarray[object] right, object op):
try:
result[i] = op(x, y)
except TypeError:
- if _checknull(x):
+ if x is None or is_nan(x):
result[i] = x
- elif _checknull(y):
+ elif y is None or is_nan(y):
result[i] = y
else:
raise
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index a24e2cdd99f6f..fba7f210b34a1 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -53,7 +53,7 @@ from pandas.core.dtypes.common import (
pandas_dtype)
from pandas.core.arrays import Categorical
from pandas.core.dtypes.concat import union_categoricals
-import pandas.io.common as com
+import pandas.io.common as icom
from pandas.errors import (ParserError, DtypeWarning,
EmptyDataError, ParserWarning)
@@ -665,7 +665,8 @@ cdef class TextReader:
if b'utf-16' in (self.encoding or b''):
# we need to read utf-16 through UTF8Recoder.
# if source is utf-16, convert source to utf-8 by UTF8Recoder.
- source = com.UTF8Recoder(source, self.encoding.decode('utf-8'))
+ source = icom.UTF8Recoder(source,
+ self.encoding.decode('utf-8'))
self.encoding = b'utf-8'
self.c_encoding = <char*> self.encoding
@@ -1356,7 +1357,7 @@ cdef asbytes(object o):
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
-_NA_VALUES = _ensure_encoded(list(com._NA_VALUES))
+_NA_VALUES = _ensure_encoded(list(icom._NA_VALUES))
def _maybe_upcast(arr):
@@ -2247,7 +2248,7 @@ def sanitize_objects(ndarray[object] values, set na_values,
n = len(values)
onan = np.nan
- for i from 0 <= i < n:
+ for i in range(n):
val = values[i]
if (convert_empty and val == '') or (val in na_values):
values[i] = onan
diff --git a/pandas/_libs/skiplist.pyx b/pandas/_libs/skiplist.pyx
index 23836ef7f4de9..eec0457fc4caf 100644
--- a/pandas/_libs/skiplist.pyx
+++ b/pandas/_libs/skiplist.pyx
@@ -5,6 +5,7 @@
# Link: http://code.activestate.com/recipes/576930/
# Cython version: Wes McKinney
+from random import random
from libc.math cimport log
@@ -17,8 +18,6 @@ cdef double Log2(double x):
return log(x) / log(2.)
-from random import random
-
# TODO: optimize this, make less messy
cdef class Node:
@@ -32,9 +31,11 @@ cdef class Node:
self.next = next
self.width = width
+
# Singleton terminator node
NIL = Node(np.inf, [], [])
+
cdef class IndexableSkiplist:
"""
Sorted collection supporting O(lg n) insertion, removal, and
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index 8cf69057a7e74..99846c2771ebe 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -393,7 +393,7 @@ def infer_dtype(object value, bint skipna=False):
# do not use is_nul_datetimelike to keep
# np.datetime64('nat') and np.timedelta64('nat')
- if util._checknull(val):
+ if val is None or util.is_nan(val):
pass
elif val is NaT:
seen_pdnat = True
@@ -522,7 +522,7 @@ cpdef object infer_datetimelike_array(object arr):
if len(objs) == 3:
break
- elif util._checknull(v):
+ elif v is None or util.is_nan(v):
# nan or None
pass
elif v is NaT:
@@ -660,7 +660,7 @@ cdef class Validator:
)
cdef bint is_valid_null(self, object value) except -1:
- return util._checknull(value)
+ return value is None or util.is_nan(value)
cdef bint is_array_typed(self) except -1:
return False
@@ -828,7 +828,7 @@ cdef class TemporalValidator(Validator):
cdef inline bint is_valid_skipna(self, object value) except -1:
cdef:
bint is_typed_null = self.is_valid_null(value)
- bint is_generic_null = util._checknull(value)
+ bint is_generic_null = value is None or util.is_nan(value)
self.generic_null_count += is_typed_null and is_generic_null
return self.is_value_typed(value) or is_typed_null or is_generic_null
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 20b43f9d5644b..25b1572cfe52f 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -586,8 +586,7 @@ NaT = NaTType()
cdef inline bint checknull_with_nat(object val):
""" utility to check if a value is a nat or not """
- return val is None or (
- PyFloat_Check(val) and val != val) or val is NaT
+ return val is None or util.is_nan(val) or val is NaT
cdef inline bint is_null_datetimelike(object val):
@@ -602,7 +601,7 @@ cdef inline bint is_null_datetimelike(object val):
-------
null_datetimelike : bool
"""
- if util._checknull(val):
+ if val is None or util.is_nan(val):
return True
elif val is NaT:
return True
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 8b8475cc3727b..eeb523e098910 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1369,7 +1369,7 @@ class Timedelta(_Timedelta):
'{op}'.format(dtype=other.dtype,
op='__floordiv__'))
- elif is_float_object(other) and util._checknull(other):
+ elif is_float_object(other) and util.is_nan(other):
# i.e. np.nan
return NotImplemented
diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd
index 624ed7ced2654..0470202ee7d98 100644
--- a/pandas/_libs/tslibs/util.pxd
+++ b/pandas/_libs/tslibs/util.pxd
@@ -228,5 +228,16 @@ cdef inline bint is_offset_object(object val):
return getattr(val, '_typ', None) == "dateoffset"
-cdef inline bint _checknull(object val):
- return val is None or (PyFloat_Check(val) and val != val)
+cdef inline bint is_nan(object val):
+ """
+ Check if val is a Not-A-Number float, including float('NaN') and np.nan.
+
+ Parameters
+ ----------
+ val : object
+
+ Returns
+ -------
+ is_nan : bool
+ """
+ return is_float_object(val) and val != val
diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx
index 041eb59812ae3..796f4b754857e 100644
--- a/pandas/_libs/writers.pyx
+++ b/pandas/_libs/writers.pyx
@@ -163,7 +163,7 @@ def string_array_replace_from_nan_rep(
if replace is None:
replace = np.nan
- for i from 0 <= i < length:
+ for i in range(length):
if arr[i] == nan_rep:
arr[i] = replace
| `util._checknull(x)` is equivalent to `x is None or isnan(x)`. On several occasions I have had to go remind myself what `_checknull` covers. It is much clearer to write out `x is None or util.is_nan(x)`, which is what this PR does.
In at least one place where `_checknull` is currently used, the `is None` part of it can be skipped.
Also gets rid of usages of private functions, and trades a no-docstring func for a nice-docstring func.
Small assorted cleanups along the way.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22146 | 2018-07-31T18:49:08Z | 2018-08-07T17:12:25Z | 2018-08-07T17:12:25Z | 2018-08-07T17:14:14Z |
Fix zillion deprecation warnings in core.dtypes.common | diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 905073645fcb3..8f7f19c3a0b9f 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -2023,7 +2023,9 @@ def pandas_dtype(dtype):
# also catch some valid dtypes such as object, np.object_ and 'object'
# which we safeguard against by catching them earlier and returning
# np.dtype(valid_dtype) before this condition is evaluated.
- if dtype in [object, np.object_, 'object', 'O']:
+ if is_hashable(dtype) and dtype in [object, np.object_, 'object', 'O']:
+ # check hashability to avoid errors/DeprecationWarning when we get
+ # here and `dtype` is an array
return npdtype
elif npdtype.kind == 'O':
raise TypeError("dtype '{}' not understood".format(dtype))
| Looking through a recent Travis log I see tons of these:
```
/home/travis/build/pandas-dev/pandas/pandas/core/dtypes/common.py:2026: DeprecationWarning: elementwise == comparison failed; this will raise an error in the future.
if dtype in [object, np.object_, 'object', 'O']:
```
This is driven by (probably not desired) cases where `dtype` is array-like at this point (also showed up in #22092). Easily avoided by checking for hashability before checking for inclusion. | https://api.github.com/repos/pandas-dev/pandas/pulls/22142 | 2018-07-31T03:48:25Z | 2018-08-01T01:05:58Z | 2018-08-01T01:05:58Z | 2018-08-08T15:52:25Z |
DOC: DataFrame.merge behaviour for suffix=(False, False) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 74f760f382c76..885139f72e3f0 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -138,12 +138,11 @@
"""
_merge_doc = """
-Merge DataFrame or named Series objects by performing a database-style join
-operation by columns or indexes.
+Merge DataFrame or named Series objects with a database-style join.
-If joining columns on columns, the DataFrame indexes *will be
-ignored*. Otherwise if joining indexes on indexes or indexes on a column or
-columns, the index will be passed on.
+The join is done on columns or indexes. If joining columns on
+columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
+on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
@@ -153,13 +152,13 @@
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
- preserve key order
+ preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
- preserve key order
+ preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
- join; sort keys lexicographically
+ join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
- join; preserve the order of the left keys
+ join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
@@ -172,22 +171,23 @@
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
-left_index : boolean, default False
+left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
-right_index : boolean, default False
+right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
-sort : boolean, default False
+sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
-suffixes : 2-length sequence (tuple, list, ...)
+suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
- side, respectively.
-copy : boolean, default True
+ side, respectively. To raise an exception on overlapping columns use
+ (False, False).
+copy : bool, default True
If False, avoid copy if possible.
-indicator : boolean or string, default False
+indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
@@ -197,7 +197,7 @@
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
-validate : string, default None
+validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
@@ -213,6 +213,7 @@
Returns
-------
DataFrame
+ A DataFrame of the two merged objects.
Notes
-----
@@ -229,24 +230,27 @@
Examples
--------
->>> A = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
-... 'value': [1, 2, 3, 5]})
->>> B = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
-... 'value': [5, 6, 7, 8]})
->>> A
+>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
+... 'value': [1, 2, 3, 5]})
+>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
+... 'value': [5, 6, 7, 8]})
+>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
->>> B
+>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
->>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
+Merge df1 and df2 on the lkey and rkey columns. The value columns have
+the default suffixes, _x and _y, appended.
+
+>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
@@ -254,6 +258,28 @@
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
+
+Merge DataFrames df1 and df2 with specified left and right suffixes
+appended to any overlapping columns.
+
+>>> df1.merge(df2, left_on='lkey', right_on='rkey',
+... suffixes=('_left', '_right'))
+ lkey value_left rkey value_right
+0 foo 1 foo 5
+1 foo 1 foo 8
+2 foo 5 foo 5
+3 foo 5 foo 8
+4 bar 2 bar 6
+5 baz 3 baz 7
+
+Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
+any overlapping columns.
+
+>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
+Traceback (most recent call last):
+...
+ValueError: columns overlap but no suffix specified:
+ Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
| - [x] closes #22045
- N/A tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- N/A whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/22141 | 2018-07-31T03:23:52Z | 2018-09-04T15:04:56Z | 2018-09-04T15:04:55Z | 2018-09-04T16:50:21Z |
[TST] make xfails strict | diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index e082629a5433d..e5d620df96493 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -511,7 +511,8 @@ def test_construction_with_ordered(self):
cat = Categorical([0, 1, 2], ordered=True)
assert cat.ordered
- @pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
+ @pytest.mark.xfail(reason="Imaginary values not supported in Categorical",
+ strict=True)
def test_constructor_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = Categorical(values)
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 4e27f1eca538f..307543eca2b3e 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -159,7 +159,8 @@ def test_setitem_frame_invalid_length(self, data):
with tm.assert_raises_regex(ValueError, xpr):
df['B'] = data[:5]
- @pytest.mark.xfail(reason="GH-20441: setitem on extension types.")
+ @pytest.mark.xfail(reason="GH#20441: setitem on extension types.",
+ strict=True)
def test_setitem_tuple_index(self, data):
s = pd.Series(data[:2], index=[(0, 0), (0, 1)])
expected = pd.Series(data.take([1, 1]), index=s.index)
diff --git a/pandas/tests/extension/integer/test_integer.py b/pandas/tests/extension/integer/test_integer.py
index 451f7488bd38a..5e0f5bf0a5dcf 100644
--- a/pandas/tests/extension/integer/test_integer.py
+++ b/pandas/tests/extension/integer/test_integer.py
@@ -599,13 +599,17 @@ def test_construct_cast_invalid(self, dtype):
class TestGroupby(BaseInteger, base.BaseGroupbyTests):
- @pytest.mark.xfail(reason="groupby not working")
+ @pytest.mark.xfail(reason="groupby not working", strict=True)
def test_groupby_extension_no_sort(self, data_for_grouping):
super(TestGroupby, self).test_groupby_extension_no_sort(
data_for_grouping)
- @pytest.mark.xfail(reason="groupby not working")
- @pytest.mark.parametrize('as_index', [True, False])
+ @pytest.mark.parametrize('as_index', [
+ pytest.param(True,
+ marks=pytest.mark.xfail(reason="groupby not working",
+ strict=True)),
+ False
+ ])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super(TestGroupby, self).test_groupby_extension_agg(
as_index, data_for_grouping)
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 520c303f1990b..b9cc3c431528f 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -142,6 +142,7 @@ def test_custom_asserts(self):
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
+ # TODO: Should this be pytest.mark.skip?
@pytest.mark.xfail(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
@@ -157,10 +158,12 @@ class TestGetitem(BaseJSON, base.BaseGetitemTests):
class TestMissing(BaseJSON, base.BaseMissingTests):
+ # TODO: Should this be pytest.mark.skip?
@pytest.mark.xfail(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
+ # TODO: Should this be pytest.mark.skip?
@pytest.mark.xfail(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@@ -212,7 +215,7 @@ def test_combine_add(self, data_repeated):
class TestCasting(BaseJSON, base.BaseCastingTests):
-
+ # TODO: Should this be pytest.mark.skip?
@pytest.mark.xfail(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index fb381a5640519..3d03a70553d2d 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -201,7 +201,8 @@ def test_df_div_zero_series_does_not_commute(self):
class TestFrameArithmetic(object):
- @pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano')
+ @pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano',
+ strict=True)
def test_df_sub_datetime64_not_ns(self):
df = pd.DataFrame(pd.date_range('20130101', periods=3))
dt64 = np.datetime64('2013-01-01')
diff --git a/pandas/tests/frame/test_duplicates.py b/pandas/tests/frame/test_duplicates.py
index 289170527dea7..940692ec5b46a 100644
--- a/pandas/tests/frame/test_duplicates.py
+++ b/pandas/tests/frame/test_duplicates.py
@@ -55,7 +55,8 @@ def test_duplicated_keep(keep, expected):
tm.assert_series_equal(result, expected)
-@pytest.mark.xfail(reason="GH21720; nan/None falsely considered equal")
+@pytest.mark.xfail(reason="GH#21720; nan/None falsely considered equal",
+ strict=True)
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index 34489051efc18..606539a564323 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -487,7 +487,17 @@ def test_agg_structs_series(structure, expected):
tm.assert_series_equal(result, expected)
-@pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.")
+@pytest.mark.parametrize('observed', [
+ True,
+ pytest.param(False,
+ marks=pytest.mark.xfail(reason="GH#18869: agg func not "
+ "called on empty groups.",
+ strict=True)),
+ pytest.param(None,
+ marks=pytest.mark.xfail(reason="GH#18869: agg func not "
+ "called on empty groups.",
+ strict=True))
+])
def test_agg_category_nansum(observed):
categories = ['a', 'b', 'c']
df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 07eef2d87feb3..7c90d359a4054 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -58,9 +58,10 @@ def test_apply_trivial():
tm.assert_frame_equal(result, expected)
-@pytest.mark.xfail(reason=("GH 20066; function passed into apply "
- "returns a DataFrame with the same index "
- "as the one to create GroupBy object."))
+@pytest.mark.xfail(reason="GH#20066; function passed into apply "
+ "returns a DataFrame with the same index "
+ "as the one to create GroupBy object.",
+ strict=True)
def test_apply_trivial_fail():
# GH 20066
# trivial apply fails if the constant dataframe has the same index
diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py
index 1e96ac730a0eb..6bbc938c346f7 100644
--- a/pandas/tests/indexes/interval/test_astype.py
+++ b/pandas/tests/indexes/interval/test_astype.py
@@ -95,7 +95,7 @@ def test_subtype_integer(self, subtype_start, subtype_end):
closed=index.closed)
tm.assert_index_equal(result, expected)
- @pytest.mark.xfail(reason='GH 15832')
+ @pytest.mark.xfail(reason='GH#15832', strict=True)
def test_subtype_integer_errors(self):
# int64 -> uint64 fails with negative values
index = interval_range(-10, 10)
@@ -133,7 +133,7 @@ def test_subtype_integer(self, subtype):
with tm.assert_raises_regex(ValueError, msg):
index.insert(0, np.nan).astype(dtype)
- @pytest.mark.xfail(reason='GH 15832')
+ @pytest.mark.xfail(reason='GH#15832', strict=True)
def test_subtype_integer_errors(self):
# float64 -> uint64 fails with negative values
index = interval_range(-10.0, 10.0)
diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py
index 79fcff965e725..bedacf84f4f9a 100644
--- a/pandas/tests/indexes/multi/test_missing.py
+++ b/pandas/tests/indexes/multi/test_missing.py
@@ -83,7 +83,7 @@ def test_nulls(idx):
idx.isna()
-@pytest.mark.xfail
+@pytest.mark.xfail(strict=True)
def test_hasnans_isnans(idx):
# GH 11343, added tests for hasnans / isnans
index = idx.copy()
diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py
index 1d3c8b94a6490..d7dbb1423a9a3 100644
--- a/pandas/tests/indexes/period/test_arithmetic.py
+++ b/pandas/tests/indexes/period/test_arithmetic.py
@@ -356,7 +356,8 @@ def test_pi_add_sub_td64_array_non_tick_raises(self):
with pytest.raises(period.IncompatibleFrequency):
tdarr - rng
- @pytest.mark.xfail(reason='op with TimedeltaIndex raises, with ndarray OK')
+ @pytest.mark.xfail(reason='op with TimedeltaIndex raises, with ndarray OK',
+ strict=True)
def test_pi_add_sub_td64_array_tick(self):
rng = pd.period_range('1/1/2000', freq='Q', periods=3)
dti = pd.date_range('2016-01-01', periods=3)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 754703dfc4bee..ef3d41126d373 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -491,8 +491,9 @@ def test_constructor_overflow_int64(self):
with tm.assert_raises_regex(OverflowError, msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
- @pytest.mark.xfail(reason="see gh-21311: Index "
- "doesn't enforce dtype argument")
+ @pytest.mark.xfail(reason="see GH#21311: Index "
+ "doesn't enforce dtype argument",
+ strict=True)
def test_constructor_cast(self):
msg = "could not convert string to float"
with tm.assert_raises_regex(ValueError, msg):
@@ -1455,7 +1456,8 @@ def test_slice_float_locs(self):
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
- @pytest.mark.xfail(reason="Assertions were not correct - see GH 20915")
+ @pytest.mark.xfail(reason="Assertions were not correct - see GH#20915",
+ strict=True)
def test_slice_ints_with_floats_raises(self):
# int slicing with floats
# GH 4892, these are all TypeErrors
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 71b2774a92612..01ff038c4dd1c 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -150,7 +150,8 @@ def test_rpow_float(self):
result = 2.0**idx
tm.assert_index_equal(result, expected)
- @pytest.mark.xfail(reason='GH#19252 Series has no __rdivmod__')
+ @pytest.mark.xfail(reason='GH#19252 Series has no __rdivmod__',
+ strict=True)
def test_divmod_series(self):
idx = self.create_index()
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index 5fb356e48289f..ea0b5f5cc0c66 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -274,7 +274,7 @@ def test_to_csv_string_array_ascii(self):
with open(path, 'r') as f:
assert f.read() == expected_ascii
- @pytest.mark.xfail
+ @pytest.mark.xfail(strict=True)
def test_to_csv_string_array_utf8(self):
# GH 10813
str_array = [{'names': ['foo', 'bar']}, {'names': ['baz', 'qux']}]
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index b6483d0e978ba..829953c144caa 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -495,7 +495,10 @@ def test_mi_falsey_name(self):
class TestTableOrientReader(object):
@pytest.mark.parametrize("index_nm", [
- None, "idx", pytest.param("index", marks=pytest.mark.xfail),
+ None,
+ "idx",
+ pytest.param("index",
+ marks=pytest.mark.xfail(strict=True)),
'level_0'])
@pytest.mark.parametrize("vals", [
{'ints': [1, 2, 3, 4]},
@@ -504,7 +507,8 @@ class TestTableOrientReader(object):
{'categoricals': pd.Series(pd.Categorical(['a', 'b', 'c', 'c']))},
{'ordered_cats': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'],
ordered=True))},
- pytest.param({'floats': [1., 2., 3., 4.]}, marks=pytest.mark.xfail),
+ pytest.param({'floats': [1., 2., 3., 4.]},
+ marks=pytest.mark.xfail(strict=True)),
{'floats': [1.1, 2.2, 3.3, 4.4]},
{'bools': [True, False, False, True]}])
def test_read_json_table_orient(self, index_nm, vals, recwarn):
@@ -562,7 +566,9 @@ def test_multiindex(self, index_names):
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("strict_check", [
- pytest.param(True, marks=pytest.mark.xfail), False])
+ pytest.param(True, marks=pytest.mark.xfail(strict=True)),
+ False
+ ])
def test_empty_frame_roundtrip(self, strict_check):
# GH 21287
df = pd.DataFrame([], columns=['a', 'b', 'c'])
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index e51780891534f..fa5a8f6a1900c 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -2227,7 +2227,8 @@ def check_called(func):
pytest.param('xlwt',
marks=pytest.mark.xfail(reason='xlwt does not support '
'openpyxl-compatible '
- 'style dicts')),
+ 'style dicts',
+ strict=True)),
'xlsxwriter',
'openpyxl',
])
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 11cbea8ce6331..fefbe8afb59cb 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -216,7 +216,8 @@ def test_options_get_engine(fp, pa):
@pytest.mark.xfail(is_platform_windows() or is_platform_mac(),
- reason="reading pa metadata failing on Windows/mac")
+ reason="reading pa metadata failing on Windows/mac",
+ strict=True)
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
@@ -383,6 +384,7 @@ def test_basic(self, pa, df_full):
check_round_trip(df, pa)
+ # TODO: This doesn't fail on all systems; track down which
@pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)")
def test_basic_subset_columns(self, pa, df_full):
# GH18628
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index db10ea15f6e9c..47a93ba82d77b 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -496,7 +496,8 @@ def test_subplots_timeseries_y_axis(self):
testdata.plot(y="text")
@pytest.mark.xfail(reason='not support for period, categorical, '
- 'datetime_mixed_tz')
+ 'datetime_mixed_tz',
+ strict=True)
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 0473610ea2f8f..e80443954a434 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -212,7 +212,6 @@ def test_parallel_coordinates(self, iris):
with tm.assert_produces_warning(FutureWarning):
parallel_coordinates(df, 'Name', colors=colors)
- @pytest.mark.xfail(reason="unreliable test")
def test_parallel_coordinates_with_sorted_labels(self):
""" For #15908 """
from pandas.plotting import parallel_coordinates
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 7e7e081408534..e3d5880eebd48 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -458,7 +458,8 @@ def test_pivot_with_list_like_values_nans(self, values):
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
- 'with KeyError #19966')
+ 'with KeyError GH#19966',
+ strict=True)
def test_pivot_with_multiindex(self):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
@@ -617,8 +618,9 @@ def test_margins_dtype(self):
tm.assert_frame_equal(expected, result)
- @pytest.mark.xfail(reason='GH 17035 (len of floats is casted back to '
- 'floats)')
+ @pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
+ 'floats)',
+ strict=True)
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
@@ -1102,8 +1104,9 @@ def test_pivot_table_margins_name_with_aggfunc_list(self):
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
- @pytest.mark.xfail(reason='GH 17035 (np.mean of ints is casted back to '
- 'ints)')
+ @pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
+ 'ints)',
+ strict=True)
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
@@ -1117,8 +1120,9 @@ def test_categorical_margins(self, observed):
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
- @pytest.mark.xfail(reason='GH 17035 (np.mean of ints is casted back to '
- 'ints)')
+ @pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
+ 'ints)',
+ strict=True)
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py
index 8fde9a417f3b7..2e3867db65604 100644
--- a/pandas/tests/scalar/period/test_asfreq.py
+++ b/pandas/tests/scalar/period/test_asfreq.py
@@ -32,7 +32,8 @@ def test_asfreq_near_zero_weekly(self):
assert week2.asfreq('D', 'S') <= per2
@pytest.mark.xfail(reason='GH#19643 period_helper asfreq functions fail '
- 'to check for overflows')
+ 'to check for overflows',
+ strict=True)
def test_to_timestamp_out_of_bounds(self):
# GH#19643, currently gives Timestamp('1754-08-30 22:43:41.128654848')
per = Period('0001-01-01', freq='B')
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index eccd86a888fb9..1e8f13ab3f219 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -1443,7 +1443,8 @@ def test_period_immutable():
per.freq = 2 * freq
-@pytest.mark.xfail(reason='GH#19834 Period parsing error')
+# TODO: This doesn't fail on all systems; track down which
+@pytest.mark.xfail(reason="Parses as Jan 1, 0007 on some systems")
def test_small_year_parsing():
per1 = Period('0001-01-07', 'D')
assert per1.year == 1
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 69969bd090b9b..09e89115e120e 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -2070,7 +2070,7 @@ def test_value_counts_with_nan(self):
"dtype",
["int_", "uint", "float_", "unicode_", "timedelta64[h]",
pytest.param("datetime64[D]",
- marks=pytest.mark.xfail(reason="issue7996"))]
+ marks=pytest.mark.xfail(reason="GH#7996", strict=True))]
)
@pytest.mark.parametrize("is_ordered", [True, False])
def test_drop_duplicates_categorical_non_bool(self, dtype, is_ordered):
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index d0e001cbfcd88..42f2d45df2def 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -223,7 +223,8 @@ def test_rank_signature(self):
'int64',
marks=pytest.mark.xfail(
reason="iNaT is equivalent to minimum value of dtype"
- "int64 pending issue #16674")),
+ "int64 pending issue GH#16674",
+ strict=True)),
([NegInfinity(), '1', 'A', 'BA', 'Ba', 'C', Infinity()],
'object')
])
diff --git a/pandas/tests/sparse/frame/test_analytics.py b/pandas/tests/sparse/frame/test_analytics.py
index ccb30502b862e..54e3ddbf2f1cf 100644
--- a/pandas/tests/sparse/frame/test_analytics.py
+++ b/pandas/tests/sparse/frame/test_analytics.py
@@ -4,8 +4,8 @@
from pandas.util import testing as tm
-@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_quantile():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
@@ -22,8 +22,8 @@ def test_quantile():
tm.assert_sp_series_equal(result, sparse_expected)
-@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_quantile_multi():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 9cc615e15564f..be5a1710119ee 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -1131,7 +1131,8 @@ def test_as_blocks(self):
tm.assert_frame_equal(df_blocks['float64'], df)
@pytest.mark.xfail(reason='nan column names in _init_dict problematic '
- '(GH 16894)')
+ '(GH#16894)',
+ strict=True)
def test_nan_columnname(self):
# GH 8822
nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])
@@ -1257,8 +1258,8 @@ def test_numpy_func_call(self):
for func in funcs:
getattr(np, func)(self.frame)
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+ @pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_quantile(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
@@ -1274,8 +1275,8 @@ def test_quantile(self):
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+ @pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_quantile_multi(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
diff --git a/pandas/tests/sparse/frame/test_indexing.py b/pandas/tests/sparse/frame/test_indexing.py
index 1c27d44015c2b..607eb2da6ded0 100644
--- a/pandas/tests/sparse/frame/test_indexing.py
+++ b/pandas/tests/sparse/frame/test_indexing.py
@@ -18,8 +18,8 @@
[np.nan, np.nan]
]
])
-@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_where_with_numeric_data(data):
# GH 17386
lower_bound = 1.5
@@ -52,8 +52,8 @@ def test_where_with_numeric_data(data):
0.1,
100.0 + 100.0j
])
-@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_where_with_numeric_data_and_other(data, other):
# GH 17386
lower_bound = 1.5
@@ -70,8 +70,8 @@ def test_where_with_numeric_data_and_other(data, other):
tm.assert_sp_frame_equal(result, sparse_expected)
-@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_where_with_bool_data():
# GH 17386
data = [[False, False], [True, True], [False, False]]
@@ -94,8 +94,8 @@ def test_where_with_bool_data():
0.1,
100.0 + 100.0j
])
-@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_where_with_bool_data_and_other(other):
# GH 17386
data = [[False, False], [True, True], [False, False]]
diff --git a/pandas/tests/sparse/series/test_indexing.py b/pandas/tests/sparse/series/test_indexing.py
index de01b065a9fa0..998285d933492 100644
--- a/pandas/tests/sparse/series/test_indexing.py
+++ b/pandas/tests/sparse/series/test_indexing.py
@@ -18,8 +18,8 @@
np.nan, np.nan
]
])
-@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_where_with_numeric_data(data):
# GH 17386
lower_bound = 1.5
@@ -70,8 +70,8 @@ def test_where_with_numeric_data_and_other(data, other):
tm.assert_sp_series_equal(result, sparse_expected)
-@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
+@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)',
+ strict=True)
def test_where_with_bool_data():
# GH 17386
data = [False, False, True, True, False, False]
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 796c637434353..58f2f41f3681c 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -858,10 +858,8 @@ def test_duplicated_with_nas(self):
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
- pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
- 2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
- marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
- ),
+ np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
+ 2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index b7530da36ed8b..bbc5bd96bad55 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -1235,7 +1235,8 @@ def test_values_consistent(array, expected_type, dtype):
pytest.param(
pd.PeriodIndex(['2017', '2018'], freq='D'),
np.array([17167, 17532]),
- marks=pytest.mark.xfail(reason="PeriodArray Not implemented")
+ marks=pytest.mark.xfail(reason="PeriodArray Not implemented",
+ strict=True)
),
])
def test_ndarray_values(array, expected):
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index cf98cff97669a..70973801d7cda 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -95,7 +95,7 @@ def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
-@pytest.mark.xfail(reason="0.7.0 pending")
+@pytest.mark.xfail(reason="0.7.0 pending", strict=True)
@tm.network
def test_pandas_datareader():
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 397da2fa40cd8..fc3b13a37fcdb 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -621,8 +621,9 @@ def test_numpy_compat(self, method):
@pytest.mark.parametrize(
'expander',
[1, pytest.param('ls', marks=pytest.mark.xfail(
- reason='GH 16425 expanding with '
- 'offset not supported'))])
+ reason='GH#16425 expanding with '
+ 'offset not supported',
+ strict=True))])
def test_empty_df_expanding(self, expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
| This leaves untouched xfails with reasons like "flaky on 3.7" and a few where the test itself is just a `pass`.
In the process a handful of previously xfailed cases are no longer xfailed. | https://api.github.com/repos/pandas-dev/pandas/pulls/22139 | 2018-07-31T01:18:06Z | 2018-08-01T10:25:13Z | 2018-08-01T10:25:13Z | 2018-08-01T15:43:24Z |
DOC: Update link and description of the Spyder IDE in Ecosystem docs | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 82ca3821fc2ed..1014982fea21a 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -14,7 +14,7 @@ development to remain focused around it's original requirements.
This is an inexhaustive list of projects that build on pandas in order to provide
tools in the PyData space. For a list of projects that depend on pandas,
-see the
+see the
`libraries.io usage page for pandas <https://libraries.io/pypi/pandas/usage>`_
or `search pypi for pandas <https://pypi.org/search/?q=pandas>`_.
@@ -44,7 +44,7 @@ ML pipeline.
`Featuretools <https://github.com/featuretools/featuretools/>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community.
+Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community.
.. _ecosystem.visualization:
@@ -149,13 +149,30 @@ for pandas ``display.`` settings.
qgrid is "an interactive grid for sorting and filtering
DataFrames in IPython Notebook" built with SlickGrid.
-`Spyder <https://github.com/spyder-ide/spyder/>`__
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+`Spyder <https://www.spyder-ide.org/>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Spyder is a cross-platform PyQt-based IDE combining the editing, analysis,
+debugging and profiling functionality of a software development tool with the
+data exploration, interactive execution, deep inspection and rich visualization
+capabilities of a scientific environment like MATLAB or Rstudio.
+
+Its `Variable Explorer <https://docs.spyder-ide.org/variableexplorer.html>`__
+allows users to view, manipulate and edit pandas ``Index``, ``Series``,
+and ``DataFrame`` objects like a "spreadsheet", including copying and modifying
+values, sorting, displaying a "heatmap", converting data types and more.
+Pandas objects can also be renamed, duplicated, new columns added,
+copyed/pasted to/from the clipboard (as TSV), and saved/loaded to/from a file.
+Spyder can also import data from a variety of plain text and binary files
+or the clipboard into a new pandas DataFrame via a sophisticated import wizard.
+
+Most pandas classes, methods and data attributes can be autocompleted in
+Spyder's `Editor <https://docs.spyder-ide.org/editor.html>`__ and
+`IPython Console <https://docs.spyder-ide.org/ipythonconsole.html>`__,
+and Spyder's `Help pane<https://docs.spyder-ide.org/help.html>`__ can retrieve
+and render Numpydoc documentation on pandas objects in rich text with Sphinx
+both automatically and on-demand.
-Spyder is a cross-platform Qt-based open-source Python IDE with
-editing, testing, debugging, and introspection features.
-Spyder can now introspect and display Pandas DataFrames and show
-both "column wise min/max and global min/max coloring."
.. _ecosystem.api:
@@ -205,12 +222,12 @@ This package requires valid credentials for this API (non free).
pandaSDMX is a library to retrieve and acquire statistical data
and metadata disseminated in
`SDMX <http://www.sdmx.org>`_ 2.1, an ISO-standard
-widely used by institutions such as statistics offices, central banks,
-and international organisations. pandaSDMX can expose datasets and related
+widely used by institutions such as statistics offices, central banks,
+and international organisations. pandaSDMX can expose datasets and related
structural metadata including data flows, code-lists,
and data structure definitions as pandas Series
or MultiIndexed DataFrames.
-
+
`fredapi <https://github.com/mortada/fredapi>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fredapi is a Python interface to the `Federal Reserve Economic Data (FRED) <http://research.stlouisfed.org/fred2/>`__
| Over the past few years, [Spyder](https://docs.spyder-ide.org/), a PyQt-based open source Python IDE focused on data science (comparable to MATLAB and Rstudio, but developed by community volunteers instead of a for-profit company), has incorporated a deep swath of ``pandas``-specific features and integration. We've also set up a new, modern website for the project, and a new documentation site as well explaining these, which should be linked to instead of the current one.
Accordingly, this PR does a few simple things, split into separate commits to make it easy to modify or revert any of the changes if undesired:
* It updates the Spyder URL to point to our new main website, in HTTPS
* It uses a more up to date one-liner description of Spyder itself, and principally focuses on a greatly expanded list of ways Spyder directly interacts with pandas and offers tools for working with Series and DataFrame objects.
* As part of this, it embeds links to our new modern, online documentation that describes how these features work, for users to easily reference
* It also cleans up a few stray trailing spaces present in the Ecosystem docs (I can revert this if undesired)
I included a fair amount of detail in the different distinct ways Spyder interacts with pandas objects and the specific features it offers when working with such, but if you think its too long I can cut down the central paragraph accordingly; just let me know and I'll do it promptly.
Thanks! | https://api.github.com/repos/pandas-dev/pandas/pulls/22136 | 2018-07-30T20:17:59Z | 2018-09-05T12:09:14Z | 2018-09-05T12:09:14Z | 2018-09-05T17:17:37Z |
DOC: read_html typo for displayed_only arg | diff --git a/pandas/io/html.py b/pandas/io/html.py
index 45fe3b017e4f6..cca27db00f48d 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -1029,7 +1029,7 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None,
.. versionadded:: 0.19.0
- display_only : bool, default True
+ displayed_only : bool, default True
Whether elements with "display: none" should be parsed
.. versionadded:: 0.23.0
| https://api.github.com/repos/pandas-dev/pandas/pulls/22134 | 2018-07-30T18:44:03Z | 2018-07-30T19:08:22Z | 2018-07-30T19:08:22Z | 2018-07-30T19:12:22Z | |
Add timetz attribute to DatetimeIndex | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 9056b1f47323a..551b3eff10fa0 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -545,6 +545,7 @@ These can be accessed like ``Series.dt.<property>``.
Series.dt.date
Series.dt.time
+ Series.dt.timetz
Series.dt.year
Series.dt.month
Series.dt.day
@@ -1739,6 +1740,7 @@ Time/Date Components
DatetimeIndex.nanosecond
DatetimeIndex.date
DatetimeIndex.time
+ DatetimeIndex.timetz
DatetimeIndex.dayofyear
DatetimeIndex.weekofyear
DatetimeIndex.week
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 9e01296d9c9c7..b7771436f8e55 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -724,6 +724,7 @@ There are several time/date properties that one can access from ``Timestamp`` or
nanosecond,"The nanoseconds of the datetime"
date,"Returns datetime.date (does not contain timezone information)"
time,"Returns datetime.time (does not contain timezone information)"
+ timetz,"Returns datetime.time as local time with timezone information"
dayofyear,"The ordinal day of year"
weekofyear,"The week ordinal of the year"
week,"The week ordinal of the year"
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 5c15c7b6a742f..9426a0b119be0 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -181,6 +181,7 @@ Other Enhancements
The default compression for ``to_csv``, ``to_json``, and ``to_pickle`` methods has been updated to ``'infer'`` (:issue:`22004`).
- :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`)
- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`)
+- :class:`DatetimeIndex` gained :attr:`DatetimeIndex.timetz` attribute. Returns local time with timezone information. (:issue:`21358`)
.. _whatsnew_0240.api_breaking:
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 12089f1617f5d..04e039a9fc2c9 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -72,7 +72,7 @@ cdef inline object create_time_from_ts(
int64_t value, npy_datetimestruct dts,
object tz, object freq):
""" convenience routine to construct a datetime.time from its parts """
- return time(dts.hour, dts.min, dts.sec, dts.us)
+ return time(dts.hour, dts.min, dts.sec, dts.us, tz)
def ints_to_pydatetime(int64_t[:] arr, tz=None, freq=None, box="datetime"):
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index ee0677f760705..1dd34cdf73ab5 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -856,6 +856,14 @@ def time(self):
return tslib.ints_to_pydatetime(timestamps, box="time")
+ @property
+ def timetz(self):
+ """
+ Returns numpy array of datetime.time also containing timezone
+ information. The time part of the Timestamps.
+ """
+ return tslib.ints_to_pydatetime(self.asi8, self.tz, box="time")
+
@property
def date(self):
"""
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 3ee91a106f36b..019aad4941d26 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -170,6 +170,7 @@ class DatetimeIndex(DatetimeArrayMixin, DatelikeOps, TimelikeOps,
nanosecond
date
time
+ timetz
dayofyear
weekofyear
week
@@ -260,7 +261,7 @@ def _add_comparison_methods(cls):
'dayofyear', 'quarter', 'days_in_month',
'daysinmonth', 'microsecond',
'nanosecond']
- _other_ops = ['date', 'time']
+ _other_ops = ['date', 'time', 'timetz']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods = ['to_period', 'tz_localize',
'tz_convert',
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index b83645414c8ff..95531b2d7a7ae 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -731,6 +731,20 @@ def test_time_accessor(self, dtype):
tm.assert_numpy_array_equal(result, expected)
+ def test_timetz_accessor(self, tz_naive_fixture):
+ # GH21358
+ if tz_naive_fixture is not None:
+ tz = dateutil.tz.gettz(tz_naive_fixture)
+ else:
+ tz = None
+
+ expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT])
+
+ index = DatetimeIndex(['2018-06-04 10:20:30', pd.NaT], tz=tz)
+ result = index.timetz
+
+ tm.assert_numpy_array_equal(result, expected)
+
def test_dti_drop_dont_lose_tz(self):
# GH#2621
ind = date_range("2012-12-01", periods=10, tz="utc")
diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py
index cd0379e7af1a3..8cebfafeae82a 100644
--- a/pandas/tests/scalar/timestamp/test_timezones.py
+++ b/pandas/tests/scalar/timestamp/test_timezones.py
@@ -2,7 +2,7 @@
"""
Tests for Timestamp timezone-related methods
"""
-from datetime import date, timedelta
+from datetime import datetime, date, timedelta
from distutils.version import LooseVersion
import pytest
@@ -290,3 +290,20 @@ def test_timestamp_add_timedelta_push_over_dst_boundary(self, tz):
expected = Timestamp('3/11/2012 05:00', tz=tz)
assert result == expected
+
+ def test_timestamp_timetz_equivalent_with_datetime_tz(self,
+ tz_naive_fixture):
+ # GH21358
+ if tz_naive_fixture is not None:
+ tz = dateutil.tz.gettz(tz_naive_fixture)
+ else:
+ tz = None
+
+ stamp = Timestamp('2018-06-04 10:20:30', tz=tz)
+ _datetime = datetime(2018, 6, 4, hour=10,
+ minute=20, second=30, tzinfo=tz)
+
+ result = stamp.timetz()
+ expected = _datetime.timetz()
+
+ assert result == expected
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index 7a02ce3a1fb2e..06eb525bbac56 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -5,7 +5,7 @@
import calendar
import pytest
-from datetime import datetime, date
+from datetime import datetime, time, date
import numpy as np
import pandas as pd
@@ -16,6 +16,8 @@
PeriodIndex, DatetimeIndex, TimedeltaIndex)
import pandas.core.common as com
+import dateutil
+
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
@@ -459,3 +461,18 @@ def test_datetime_understood(self):
expected = pd.Series(pd.to_datetime([
'2011-12-26', '2011-12-27', '2011-12-28']))
tm.assert_series_equal(result, expected)
+
+ def test_dt_timetz_accessor(self, tz_naive_fixture):
+ # GH21358
+ if tz_naive_fixture is not None:
+ tz = dateutil.tz.gettz(tz_naive_fixture)
+ else:
+ tz = None
+
+ dtindex = pd.DatetimeIndex(['2014-04-04 23:56', '2014-07-18 21:24',
+ '2015-11-22 22:14'], tz=tz)
+ s = Series(dtindex)
+ expected = Series([time(23, 56, tzinfo=tz), time(21, 24, tzinfo=tz),
+ time(22, 14, tzinfo=tz)])
+ result = s.dt.timetz
+ tm.assert_series_equal(result, expected)
| - [x] closes #21358
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I only added timetz to DatetimeIndex because there was already a timetz method in Timestamp that did essentially the same thing. If it is still worth adding to Timestamp though, I can certainly do that.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22132 | 2018-07-30T17:06:20Z | 2018-08-09T11:03:28Z | 2018-08-09T11:03:28Z | 2018-08-19T23:39:17Z |
BUG: Fix get dummies unicode error | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 8a92db4c66fb5..b7692f51c4ddf 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -660,6 +660,7 @@ Reshaping
- Bug in :meth:`Series.mask` and :meth:`DataFrame.mask` with ``list`` conditionals (:issue:`21891`)
- Bug in :meth:`DataFrame.replace` raises RecursionError when converting OutOfBounds ``datetime64[ns, tz]`` (:issue:`20380`)
- :func:`pandas.core.groupby.GroupBy.rank` now raises a ``ValueError`` when an invalid value is passed for argument ``na_option`` (:issue:`22124`)
+- Bug in :func:`get_dummies` with Unicode attributes in Python 2 (:issue:`22084`)
-
Build Changes
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index f9ab813855f47..bd5ce4897e9da 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -1,6 +1,6 @@
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
-from pandas.compat import range, text_type, zip
+from pandas.compat import range, text_type, zip, u, PY2
from pandas import compat
from functools import partial
import itertools
@@ -923,13 +923,23 @@ def get_empty_Frame(data, sparse):
number_of_cols = len(levels)
- if prefix is not None:
- dummy_strs = [u'{prefix}{sep}{level}' if isinstance(v, text_type)
- else '{prefix}{sep}{level}' for v in levels]
- dummy_cols = [dummy_str.format(prefix=prefix, sep=prefix_sep, level=v)
- for dummy_str, v in zip(dummy_strs, levels)]
- else:
+ if prefix is None:
dummy_cols = levels
+ else:
+
+ # PY2 embedded unicode, gh-22084
+ def _make_col_name(prefix, prefix_sep, level):
+ fstr = '{prefix}{prefix_sep}{level}'
+ if PY2 and (isinstance(prefix, text_type) or
+ isinstance(prefix_sep, text_type) or
+ isinstance(level, text_type)):
+ fstr = u(fstr)
+ return fstr.format(prefix=prefix,
+ prefix_sep=prefix_sep,
+ level=level)
+
+ dummy_cols = [_make_col_name(prefix, prefix_sep, level)
+ for level in levels]
if isinstance(data, Series):
index = data.index
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index 295801f3e8def..3f4ccd7693a8f 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -302,6 +302,24 @@ def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
expected.sort_index(axis=1)
assert_frame_equal(result, expected)
+ @pytest.mark.parametrize('get_dummies_kwargs,expected', [
+ ({'data': pd.DataFrame(({u'ä': ['a']}))},
+ pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
+
+ ({'data': pd.DataFrame({'x': [u'ä']})},
+ pd.DataFrame({u'x_ä': [1]}, dtype=np.uint8)),
+
+ ({'data': pd.DataFrame({'x': [u'a']}), 'prefix':u'ä'},
+ pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
+
+ ({'data': pd.DataFrame({'x': [u'a']}), 'prefix_sep':u'ä'},
+ pd.DataFrame({u'xäa': [1]}, dtype=np.uint8))])
+ def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
+ # GH22084 pd.get_dummies incorrectly encodes unicode characters
+ # in dataframe column names
+ result = get_dummies(**get_dummies_kwargs)
+ assert_frame_equal(result, expected)
+
def test_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
| - [x] closes #22084
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/22131 | 2018-07-30T15:59:02Z | 2018-08-02T17:21:37Z | 2018-08-02T17:21:37Z | 2018-08-02T17:21:43Z |
Add Japanese version of Pandas_Cheat_Sheet | diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pdf
new file mode 100644
index 0000000000000..746d1b6c980fe
Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pdf differ
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pptx
new file mode 100644
index 0000000000000..f8b98a6f1f8e4
Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pptx differ
| ## Background
We are sponsoring `PyCon` on September in Japan. And we are planning to influence Pandas in Japan. To do so, we are planning to distribute `Pandas Cheet Sheet` with plastic file at `Pycon`. | https://api.github.com/repos/pandas-dev/pandas/pulls/22127 | 2018-07-30T08:44:10Z | 2018-07-30T10:05:24Z | 2018-07-30T10:05:24Z | 2018-07-30T10:05:36Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.