title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
REF: move misplaced pd.concat tests | diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
deleted file mode 100644
index 7eba2b873c4f4..0000000000000
--- a/pandas/tests/frame/test_combine_concat.py
+++ /dev/null
@@ -1,236 +0,0 @@
-from datetime import datetime
-
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas import DataFrame, Index, Series, Timestamp, date_range
-import pandas._testing as tm
-
-
-class TestDataFrameConcat:
- def test_concat_multiple_frames_dtypes(self):
-
- # GH 2759
- A = DataFrame(data=np.ones((10, 2)), columns=["foo", "bar"], dtype=np.float64)
- B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
- results = pd.concat((A, B), axis=1).dtypes
- expected = Series(
- [np.dtype("float64")] * 2 + [np.dtype("float32")] * 2,
- index=["foo", "bar", 0, 1],
- )
- tm.assert_series_equal(results, expected)
-
- def test_concat_multiple_tzs(self):
- # GH 12467
- # combining datetime tz-aware and naive DataFrames
- ts1 = Timestamp("2015-01-01", tz=None)
- ts2 = Timestamp("2015-01-01", tz="UTC")
- ts3 = Timestamp("2015-01-01", tz="EST")
-
- df1 = DataFrame(dict(time=[ts1]))
- df2 = DataFrame(dict(time=[ts2]))
- df3 = DataFrame(dict(time=[ts3]))
-
- results = pd.concat([df1, df2]).reset_index(drop=True)
- expected = DataFrame(dict(time=[ts1, ts2]), dtype=object)
- tm.assert_frame_equal(results, expected)
-
- results = pd.concat([df1, df3]).reset_index(drop=True)
- expected = DataFrame(dict(time=[ts1, ts3]), dtype=object)
- tm.assert_frame_equal(results, expected)
-
- results = pd.concat([df2, df3]).reset_index(drop=True)
- expected = DataFrame(dict(time=[ts2, ts3]))
- tm.assert_frame_equal(results, expected)
-
- @pytest.mark.parametrize(
- "t1",
- [
- "2015-01-01",
- pytest.param(
- pd.NaT,
- marks=pytest.mark.xfail(
- reason="GH23037 incorrect dtype when concatenating"
- ),
- ),
- ],
- )
- def test_concat_tz_NaT(self, t1):
- # GH 22796
- # Concating tz-aware multicolumn DataFrames
- ts1 = Timestamp(t1, tz="UTC")
- ts2 = Timestamp("2015-01-01", tz="UTC")
- ts3 = Timestamp("2015-01-01", tz="UTC")
-
- df1 = DataFrame([[ts1, ts2]])
- df2 = DataFrame([[ts3]])
-
- result = pd.concat([df1, df2])
- expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
-
- tm.assert_frame_equal(result, expected)
-
- def test_concat_tz_not_aligned(self):
- # GH 22796
- ts = pd.to_datetime([1, 2]).tz_localize("UTC")
- a = pd.DataFrame({"A": ts})
- b = pd.DataFrame({"A": ts, "B": ts})
- result = pd.concat([a, b], sort=True, ignore_index=True)
- expected = pd.DataFrame(
- {"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)}
- )
- tm.assert_frame_equal(result, expected)
-
- def test_concat_tuple_keys(self):
- # GH 14438
- df1 = pd.DataFrame(np.ones((2, 2)), columns=list("AB"))
- df2 = pd.DataFrame(np.ones((3, 2)) * 2, columns=list("AB"))
- results = pd.concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")])
- expected = pd.DataFrame(
- {
- "A": {
- ("bee", "bah", 0): 1.0,
- ("bee", "bah", 1): 1.0,
- ("bee", "boo", 0): 2.0,
- ("bee", "boo", 1): 2.0,
- ("bee", "boo", 2): 2.0,
- },
- "B": {
- ("bee", "bah", 0): 1.0,
- ("bee", "bah", 1): 1.0,
- ("bee", "boo", 0): 2.0,
- ("bee", "boo", 1): 2.0,
- ("bee", "boo", 2): 2.0,
- },
- }
- )
- tm.assert_frame_equal(results, expected)
-
- def test_concat_named_keys(self):
- # GH 14252
- df = pd.DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]})
- index = Index(["a", "b"], name="baz")
- concatted_named_from_keys = pd.concat([df, df], keys=index)
- expected_named = pd.DataFrame(
- {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
- index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]),
- )
- tm.assert_frame_equal(concatted_named_from_keys, expected_named)
-
- index_no_name = Index(["a", "b"], name=None)
- concatted_named_from_names = pd.concat(
- [df, df], keys=index_no_name, names=["baz"]
- )
- tm.assert_frame_equal(concatted_named_from_names, expected_named)
-
- concatted_unnamed = pd.concat([df, df], keys=index_no_name)
- expected_unnamed = pd.DataFrame(
- {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
- index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]),
- )
- tm.assert_frame_equal(concatted_unnamed, expected_unnamed)
-
- def test_concat_axis_parameter(self):
- # GH 14369
- df1 = pd.DataFrame({"A": [0.1, 0.2]}, index=range(2))
- df2 = pd.DataFrame({"A": [0.3, 0.4]}, index=range(2))
-
- # Index/row/0 DataFrame
- expected_index = pd.DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
-
- concatted_index = pd.concat([df1, df2], axis="index")
- tm.assert_frame_equal(concatted_index, expected_index)
-
- concatted_row = pd.concat([df1, df2], axis="rows")
- tm.assert_frame_equal(concatted_row, expected_index)
-
- concatted_0 = pd.concat([df1, df2], axis=0)
- tm.assert_frame_equal(concatted_0, expected_index)
-
- # Columns/1 DataFrame
- expected_columns = pd.DataFrame(
- [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"]
- )
-
- concatted_columns = pd.concat([df1, df2], axis="columns")
- tm.assert_frame_equal(concatted_columns, expected_columns)
-
- concatted_1 = pd.concat([df1, df2], axis=1)
- tm.assert_frame_equal(concatted_1, expected_columns)
-
- series1 = pd.Series([0.1, 0.2])
- series2 = pd.Series([0.3, 0.4])
-
- # Index/row/0 Series
- expected_index_series = pd.Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
-
- concatted_index_series = pd.concat([series1, series2], axis="index")
- tm.assert_series_equal(concatted_index_series, expected_index_series)
-
- concatted_row_series = pd.concat([series1, series2], axis="rows")
- tm.assert_series_equal(concatted_row_series, expected_index_series)
-
- concatted_0_series = pd.concat([series1, series2], axis=0)
- tm.assert_series_equal(concatted_0_series, expected_index_series)
-
- # Columns/1 Series
- expected_columns_series = pd.DataFrame(
- [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1]
- )
-
- concatted_columns_series = pd.concat([series1, series2], axis="columns")
- tm.assert_frame_equal(concatted_columns_series, expected_columns_series)
-
- concatted_1_series = pd.concat([series1, series2], axis=1)
- tm.assert_frame_equal(concatted_1_series, expected_columns_series)
-
- # Testing ValueError
- with pytest.raises(ValueError, match="No axis named"):
- pd.concat([series1, series2], axis="something")
-
- def test_concat_numerical_names(self):
- # #15262 # #12223
- df = pd.DataFrame(
- {"col": range(9)},
- dtype="int32",
- index=(
- pd.MultiIndex.from_product(
- [["A0", "A1", "A2"], ["B0", "B1", "B2"]], names=[1, 2]
- )
- ),
- )
- result = pd.concat((df.iloc[:2, :], df.iloc[-2:, :]))
- expected = pd.DataFrame(
- {"col": [0, 1, 7, 8]},
- dtype="int32",
- index=pd.MultiIndex.from_tuples(
- [("A0", "B0"), ("A0", "B1"), ("A2", "B1"), ("A2", "B2")], names=[1, 2]
- ),
- )
- tm.assert_frame_equal(result, expected)
-
- def test_concat_astype_dup_col(self):
- # gh 23049
- df = pd.DataFrame([{"a": "b"}])
- df = pd.concat([df, df], axis=1)
-
- result = df.astype("category")
- expected = pd.DataFrame(
- np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"]
- ).astype("category")
- tm.assert_frame_equal(result, expected)
-
- def test_concat_datetime_datetime64_frame(self):
- # #2624
- rows = []
- rows.append([datetime(2010, 1, 1), 1])
- rows.append([datetime(2010, 1, 2), "hi"])
-
- df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
-
- ind = date_range(start="2000/1/1", freq="D", periods=10)
- df1 = DataFrame({"date": ind, "test": range(10)})
-
- # it works!
- pd.concat([df1, df2_obj])
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index f0eb745041a66..48500531aa351 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -2925,3 +2925,349 @@ def test_concat_preserves_extension_int64_dtype():
result = pd.concat([df_a, df_b], ignore_index=True)
expected = pd.DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
+
+
+class TestSeriesConcat:
+ @pytest.mark.parametrize(
+ "dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
+ )
+ def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
+ dtype = np.dtype(dtype)
+
+ result = pd.concat([Series(dtype=dtype)])
+ assert result.dtype == dtype
+
+ result = pd.concat([Series(dtype=dtype), Series(dtype=dtype)])
+ assert result.dtype == dtype
+
+ def test_concat_empty_series_dtypes_roundtrips(self):
+
+ # round-tripping with self & like self
+ dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
+
+ def int_result_type(dtype, dtype2):
+ typs = {dtype.kind, dtype2.kind}
+ if not len(typs - {"i", "u", "b"}) and (
+ dtype.kind == "i" or dtype2.kind == "i"
+ ):
+ return "i"
+ elif not len(typs - {"u", "b"}) and (
+ dtype.kind == "u" or dtype2.kind == "u"
+ ):
+ return "u"
+ return None
+
+ def float_result_type(dtype, dtype2):
+ typs = {dtype.kind, dtype2.kind}
+ if not len(typs - {"f", "i", "u"}) and (
+ dtype.kind == "f" or dtype2.kind == "f"
+ ):
+ return "f"
+ return None
+
+ def get_result_type(dtype, dtype2):
+ result = float_result_type(dtype, dtype2)
+ if result is not None:
+ return result
+ result = int_result_type(dtype, dtype2)
+ if result is not None:
+ return result
+ return "O"
+
+ for dtype in dtypes:
+ for dtype2 in dtypes:
+ if dtype == dtype2:
+ continue
+
+ expected = get_result_type(dtype, dtype2)
+ result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
+ assert result.kind == expected
+
+ @pytest.mark.parametrize(
+ "left,right,expected",
+ [
+ # booleans
+ (np.bool_, np.int32, np.int32),
+ (np.bool_, np.float32, np.object_),
+ # datetime-like
+ ("m8[ns]", np.bool_, np.object_),
+ ("m8[ns]", np.int64, np.object_),
+ ("M8[ns]", np.bool_, np.object_),
+ ("M8[ns]", np.int64, np.object_),
+ # categorical
+ ("category", "category", "category"),
+ ("category", "object", "object"),
+ ],
+ )
+ def test_concat_empty_series_dtypes(self, left, right, expected):
+ result = pd.concat([Series(dtype=left), Series(dtype=right)])
+ assert result.dtype == expected
+
+ def test_concat_empty_series_dtypes_triple(self):
+
+ assert (
+ pd.concat(
+ [Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
+ ).dtype
+ == np.object_
+ )
+
+ def test_concat_empty_series_dtype_category_with_array(self):
+ # GH#18515
+ assert (
+ pd.concat(
+ [Series(np.array([]), dtype="category"), Series(dtype="float64")]
+ ).dtype
+ == "float64"
+ )
+
+ def test_concat_empty_series_dtypes_sparse(self):
+ result = pd.concat(
+ [
+ Series(dtype="float64").astype("Sparse"),
+ Series(dtype="float64").astype("Sparse"),
+ ]
+ )
+ assert result.dtype == "Sparse[float64]"
+
+ result = pd.concat(
+ [Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
+ )
+ # TODO: release-note: concat sparse dtype
+ expected = pd.SparseDtype(np.float64)
+ assert result.dtype == expected
+
+ result = pd.concat(
+ [Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
+ )
+ # TODO: release-note: concat sparse dtype
+ expected = pd.SparseDtype("object")
+ assert result.dtype == expected
+
+
+class TestDataFrameConcat:
+ def test_concat_multiple_frames_dtypes(self):
+
+ # GH#2759
+ A = DataFrame(data=np.ones((10, 2)), columns=["foo", "bar"], dtype=np.float64)
+ B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
+ results = pd.concat((A, B), axis=1).dtypes
+ expected = Series(
+ [np.dtype("float64")] * 2 + [np.dtype("float32")] * 2,
+ index=["foo", "bar", 0, 1],
+ )
+ tm.assert_series_equal(results, expected)
+
+ def test_concat_multiple_tzs(self):
+ # GH#12467
+ # combining datetime tz-aware and naive DataFrames
+ ts1 = Timestamp("2015-01-01", tz=None)
+ ts2 = Timestamp("2015-01-01", tz="UTC")
+ ts3 = Timestamp("2015-01-01", tz="EST")
+
+ df1 = DataFrame(dict(time=[ts1]))
+ df2 = DataFrame(dict(time=[ts2]))
+ df3 = DataFrame(dict(time=[ts3]))
+
+ results = pd.concat([df1, df2]).reset_index(drop=True)
+ expected = DataFrame(dict(time=[ts1, ts2]), dtype=object)
+ tm.assert_frame_equal(results, expected)
+
+ results = pd.concat([df1, df3]).reset_index(drop=True)
+ expected = DataFrame(dict(time=[ts1, ts3]), dtype=object)
+ tm.assert_frame_equal(results, expected)
+
+ results = pd.concat([df2, df3]).reset_index(drop=True)
+ expected = DataFrame(dict(time=[ts2, ts3]))
+ tm.assert_frame_equal(results, expected)
+
+ @pytest.mark.parametrize(
+ "t1",
+ [
+ "2015-01-01",
+ pytest.param(
+ pd.NaT,
+ marks=pytest.mark.xfail(
+ reason="GH23037 incorrect dtype when concatenating"
+ ),
+ ),
+ ],
+ )
+ def test_concat_tz_NaT(self, t1):
+ # GH#22796
+ # Concating tz-aware multicolumn DataFrames
+ ts1 = Timestamp(t1, tz="UTC")
+ ts2 = Timestamp("2015-01-01", tz="UTC")
+ ts3 = Timestamp("2015-01-01", tz="UTC")
+
+ df1 = DataFrame([[ts1, ts2]])
+ df2 = DataFrame([[ts3]])
+
+ result = pd.concat([df1, df2])
+ expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_concat_tz_not_aligned(self):
+ # GH#22796
+ ts = pd.to_datetime([1, 2]).tz_localize("UTC")
+ a = pd.DataFrame({"A": ts})
+ b = pd.DataFrame({"A": ts, "B": ts})
+ result = pd.concat([a, b], sort=True, ignore_index=True)
+ expected = pd.DataFrame(
+ {"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)}
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_concat_tuple_keys(self):
+ # GH#14438
+ df1 = pd.DataFrame(np.ones((2, 2)), columns=list("AB"))
+ df2 = pd.DataFrame(np.ones((3, 2)) * 2, columns=list("AB"))
+ results = pd.concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")])
+ expected = pd.DataFrame(
+ {
+ "A": {
+ ("bee", "bah", 0): 1.0,
+ ("bee", "bah", 1): 1.0,
+ ("bee", "boo", 0): 2.0,
+ ("bee", "boo", 1): 2.0,
+ ("bee", "boo", 2): 2.0,
+ },
+ "B": {
+ ("bee", "bah", 0): 1.0,
+ ("bee", "bah", 1): 1.0,
+ ("bee", "boo", 0): 2.0,
+ ("bee", "boo", 1): 2.0,
+ ("bee", "boo", 2): 2.0,
+ },
+ }
+ )
+ tm.assert_frame_equal(results, expected)
+
+ def test_concat_named_keys(self):
+ # GH#14252
+ df = pd.DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]})
+ index = Index(["a", "b"], name="baz")
+ concatted_named_from_keys = pd.concat([df, df], keys=index)
+ expected_named = pd.DataFrame(
+ {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
+ index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]),
+ )
+ tm.assert_frame_equal(concatted_named_from_keys, expected_named)
+
+ index_no_name = Index(["a", "b"], name=None)
+ concatted_named_from_names = pd.concat(
+ [df, df], keys=index_no_name, names=["baz"]
+ )
+ tm.assert_frame_equal(concatted_named_from_names, expected_named)
+
+ concatted_unnamed = pd.concat([df, df], keys=index_no_name)
+ expected_unnamed = pd.DataFrame(
+ {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
+ index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]),
+ )
+ tm.assert_frame_equal(concatted_unnamed, expected_unnamed)
+
+ def test_concat_axis_parameter(self):
+ # GH#14369
+ df1 = pd.DataFrame({"A": [0.1, 0.2]}, index=range(2))
+ df2 = pd.DataFrame({"A": [0.3, 0.4]}, index=range(2))
+
+ # Index/row/0 DataFrame
+ expected_index = pd.DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
+
+ concatted_index = pd.concat([df1, df2], axis="index")
+ tm.assert_frame_equal(concatted_index, expected_index)
+
+ concatted_row = pd.concat([df1, df2], axis="rows")
+ tm.assert_frame_equal(concatted_row, expected_index)
+
+ concatted_0 = pd.concat([df1, df2], axis=0)
+ tm.assert_frame_equal(concatted_0, expected_index)
+
+ # Columns/1 DataFrame
+ expected_columns = pd.DataFrame(
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"]
+ )
+
+ concatted_columns = pd.concat([df1, df2], axis="columns")
+ tm.assert_frame_equal(concatted_columns, expected_columns)
+
+ concatted_1 = pd.concat([df1, df2], axis=1)
+ tm.assert_frame_equal(concatted_1, expected_columns)
+
+ series1 = pd.Series([0.1, 0.2])
+ series2 = pd.Series([0.3, 0.4])
+
+ # Index/row/0 Series
+ expected_index_series = pd.Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
+
+ concatted_index_series = pd.concat([series1, series2], axis="index")
+ tm.assert_series_equal(concatted_index_series, expected_index_series)
+
+ concatted_row_series = pd.concat([series1, series2], axis="rows")
+ tm.assert_series_equal(concatted_row_series, expected_index_series)
+
+ concatted_0_series = pd.concat([series1, series2], axis=0)
+ tm.assert_series_equal(concatted_0_series, expected_index_series)
+
+ # Columns/1 Series
+ expected_columns_series = pd.DataFrame(
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1]
+ )
+
+ concatted_columns_series = pd.concat([series1, series2], axis="columns")
+ tm.assert_frame_equal(concatted_columns_series, expected_columns_series)
+
+ concatted_1_series = pd.concat([series1, series2], axis=1)
+ tm.assert_frame_equal(concatted_1_series, expected_columns_series)
+
+ # Testing ValueError
+ with pytest.raises(ValueError, match="No axis named"):
+ pd.concat([series1, series2], axis="something")
+
+ def test_concat_numerical_names(self):
+ # GH#15262, GH#12223
+ df = pd.DataFrame(
+ {"col": range(9)},
+ dtype="int32",
+ index=(
+ pd.MultiIndex.from_product(
+ [["A0", "A1", "A2"], ["B0", "B1", "B2"]], names=[1, 2]
+ )
+ ),
+ )
+ result = pd.concat((df.iloc[:2, :], df.iloc[-2:, :]))
+ expected = pd.DataFrame(
+ {"col": [0, 1, 7, 8]},
+ dtype="int32",
+ index=pd.MultiIndex.from_tuples(
+ [("A0", "B0"), ("A0", "B1"), ("A2", "B1"), ("A2", "B2")], names=[1, 2]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_concat_astype_dup_col(self):
+ # GH#23049
+ df = pd.DataFrame([{"a": "b"}])
+ df = pd.concat([df, df], axis=1)
+
+ result = df.astype("category")
+ expected = pd.DataFrame(
+ np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"]
+ ).astype("category")
+ tm.assert_frame_equal(result, expected)
+
+ def test_concat_datetime_datetime64_frame(self):
+ # GH#2624
+ rows = []
+ rows.append([datetime(2010, 1, 1), 1])
+ rows.append([datetime(2010, 1, 2), "hi"])
+
+ df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
+
+ ind = date_range(start="2000/1/1", freq="D", periods=10)
+ df1 = DataFrame({"date": ind, "test": range(10)})
+
+ # it works!
+ pd.concat([df1, df2_obj])
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
deleted file mode 100644
index 95eba6ccc4df8..0000000000000
--- a/pandas/tests/series/test_combine_concat.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas import Series
-
-
-class TestSeriesConcat:
- @pytest.mark.parametrize(
- "dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
- )
- def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
- dtype = np.dtype(dtype)
-
- result = pd.concat([Series(dtype=dtype)])
- assert result.dtype == dtype
-
- result = pd.concat([Series(dtype=dtype), Series(dtype=dtype)])
- assert result.dtype == dtype
-
- def test_concat_empty_series_dtypes_roundtrips(self):
-
- # round-tripping with self & like self
- dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
-
- def int_result_type(dtype, dtype2):
- typs = {dtype.kind, dtype2.kind}
- if not len(typs - {"i", "u", "b"}) and (
- dtype.kind == "i" or dtype2.kind == "i"
- ):
- return "i"
- elif not len(typs - {"u", "b"}) and (
- dtype.kind == "u" or dtype2.kind == "u"
- ):
- return "u"
- return None
-
- def float_result_type(dtype, dtype2):
- typs = {dtype.kind, dtype2.kind}
- if not len(typs - {"f", "i", "u"}) and (
- dtype.kind == "f" or dtype2.kind == "f"
- ):
- return "f"
- return None
-
- def get_result_type(dtype, dtype2):
- result = float_result_type(dtype, dtype2)
- if result is not None:
- return result
- result = int_result_type(dtype, dtype2)
- if result is not None:
- return result
- return "O"
-
- for dtype in dtypes:
- for dtype2 in dtypes:
- if dtype == dtype2:
- continue
-
- expected = get_result_type(dtype, dtype2)
- result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
- assert result.kind == expected
-
- @pytest.mark.parametrize(
- "left,right,expected",
- [
- # booleans
- (np.bool_, np.int32, np.int32),
- (np.bool_, np.float32, np.object_),
- # datetime-like
- ("m8[ns]", np.bool_, np.object_),
- ("m8[ns]", np.int64, np.object_),
- ("M8[ns]", np.bool_, np.object_),
- ("M8[ns]", np.int64, np.object_),
- # categorical
- ("category", "category", "category"),
- ("category", "object", "object"),
- ],
- )
- def test_concat_empty_series_dtypes(self, left, right, expected):
- result = pd.concat([Series(dtype=left), Series(dtype=right)])
- assert result.dtype == expected
-
- def test_concat_empty_series_dtypes_triple(self):
-
- assert (
- pd.concat(
- [Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
- ).dtype
- == np.object_
- )
-
- def test_concat_empty_series_dtype_category_with_array(self):
- # GH 18515
- assert (
- pd.concat(
- [Series(np.array([]), dtype="category"), Series(dtype="float64")]
- ).dtype
- == "float64"
- )
-
- def test_concat_empty_series_dtypes_sparse(self):
- result = pd.concat(
- [
- Series(dtype="float64").astype("Sparse"),
- Series(dtype="float64").astype("Sparse"),
- ]
- )
- assert result.dtype == "Sparse[float64]"
-
- result = pd.concat(
- [Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
- )
- # TODO: release-note: concat sparse dtype
- expected = pd.SparseDtype(np.float64)
- assert result.dtype == expected
-
- result = pd.concat(
- [Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
- )
- # TODO: release-note: concat sparse dtype
- expected = pd.SparseDtype("object")
- assert result.dtype == expected
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37232 | 2020-10-18T23:14:25Z | 2020-10-19T12:58:44Z | 2020-10-19T12:58:44Z | 2020-10-19T16:32:24Z |
REF: move get_op_result_name out of ops.__init__ | diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index fb3005484b2f1..2b159c607b0a0 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -14,7 +14,7 @@
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_array_like, is_list_like
-from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
+from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms
@@ -25,7 +25,10 @@
get_array_op,
logical_op,
)
-from pandas.core.ops.common import unpack_zerodim_and_defer # noqa:F401
+from pandas.core.ops.common import ( # noqa:F401
+ get_op_result_name,
+ unpack_zerodim_and_defer,
+)
from pandas.core.ops.docstrings import (
_flex_comp_doc_FRAME,
_op_descriptions,
@@ -76,67 +79,6 @@
COMPARISON_BINOPS: Set[str] = {"eq", "ne", "lt", "gt", "le", "ge"}
-# -----------------------------------------------------------------------------
-# Ops Wrapping Utilities
-
-
-def get_op_result_name(left, right):
- """
- Find the appropriate name to pin to an operation result. This result
- should always be either an Index or a Series.
-
- Parameters
- ----------
- left : {Series, Index}
- right : object
-
- Returns
- -------
- name : object
- Usually a string
- """
- # `left` is always a Series when called from within ops
- if isinstance(right, (ABCSeries, ABCIndexClass)):
- name = _maybe_match_name(left, right)
- else:
- name = left.name
- return name
-
-
-def _maybe_match_name(a, b):
- """
- Try to find a name to attach to the result of an operation between
- a and b. If only one of these has a `name` attribute, return that
- name. Otherwise return a consensus name if they match of None if
- they have different names.
-
- Parameters
- ----------
- a : object
- b : object
-
- Returns
- -------
- name : str or None
-
- See Also
- --------
- pandas.core.common.consensus_name_attr
- """
- a_has = hasattr(a, "name")
- b_has = hasattr(b, "name")
- if a_has and b_has:
- if a.name == b.name:
- return a.name
- else:
- # TODO: what if they both have np.nan for their names?
- return None
- elif a_has:
- return a.name
- elif b_has:
- return b.name
- return None
-
# -----------------------------------------------------------------------------
# Masking NA values and fallbacks for operations numpy does not support
diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py
index 515a0a5198d74..a6bcab44e5519 100644
--- a/pandas/core/ops/common.py
+++ b/pandas/core/ops/common.py
@@ -65,3 +65,60 @@ def new_method(self, other):
return method(self, other)
return new_method
+
+
+def get_op_result_name(left, right):
+ """
+ Find the appropriate name to pin to an operation result. This result
+ should always be either an Index or a Series.
+
+ Parameters
+ ----------
+ left : {Series, Index}
+ right : object
+
+ Returns
+ -------
+ name : object
+ Usually a string
+ """
+ if isinstance(right, (ABCSeries, ABCIndexClass)):
+ name = _maybe_match_name(left, right)
+ else:
+ name = left.name
+ return name
+
+
+def _maybe_match_name(a, b):
+ """
+ Try to find a name to attach to the result of an operation between
+ a and b. If only one of these has a `name` attribute, return that
+ name. Otherwise return a consensus name if they match of None if
+ they have different names.
+
+ Parameters
+ ----------
+ a : object
+ b : object
+
+ Returns
+ -------
+ name : str or None
+
+ See Also
+ --------
+ pandas.core.common.consensus_name_attr
+ """
+ a_has = hasattr(a, "name")
+ b_has = hasattr(b, "name")
+ if a_has and b_has:
+ if a.name == b.name:
+ return a.name
+ else:
+ # TODO: what if they both have np.nan for their names?
+ return None
+ elif a_has:
+ return a.name
+ elif b_has:
+ return b.name
+ return None
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 17d7527a2b687..366a1970f6f64 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -106,7 +106,7 @@ def test_random_state():
],
)
def test_maybe_match_name(left, right, expected):
- assert ops._maybe_match_name(left, right) == expected
+ assert ops.common._maybe_match_name(left, right) == expected
def test_dict_compat():
| Slowly chipping away at the long-time goal of not having a bunch of code in `ops.__init__` | https://api.github.com/repos/pandas-dev/pandas/pulls/37231 | 2020-10-18T22:53:01Z | 2020-10-19T23:42:29Z | 2020-10-19T23:42:29Z | 2020-10-19T23:43:56Z |
TST: collect unary tests | diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 28c1e2a24bc4d..fb7a6e586c460 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -1,131 +1,13 @@
-from decimal import Decimal
import operator
import re
import numpy as np
import pytest
-import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
-class TestDataFrameUnaryOperators:
- # __pos__, __neg__, __inv__
-
- @pytest.mark.parametrize(
- "df,expected",
- [
- (pd.DataFrame({"a": [-1, 1]}), pd.DataFrame({"a": [1, -1]})),
- (pd.DataFrame({"a": [False, True]}), pd.DataFrame({"a": [True, False]})),
- (
- pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),
- pd.DataFrame({"a": pd.Series(pd.to_timedelta([1, -1]))}),
- ),
- ],
- )
- def test_neg_numeric(self, df, expected):
- tm.assert_frame_equal(-df, expected)
- tm.assert_series_equal(-df["a"], expected["a"])
-
- @pytest.mark.parametrize(
- "df, expected",
- [
- (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
- ([Decimal("1.0"), Decimal("2.0")], [Decimal("-1.0"), Decimal("-2.0")]),
- ],
- )
- def test_neg_object(self, df, expected):
- # GH#21380
- df = pd.DataFrame({"a": df})
- expected = pd.DataFrame({"a": expected})
- tm.assert_frame_equal(-df, expected)
- tm.assert_series_equal(-df["a"], expected["a"])
-
- @pytest.mark.parametrize(
- "df",
- [
- pd.DataFrame({"a": ["a", "b"]}),
- pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])}),
- ],
- )
- def test_neg_raises(self, df):
- msg = (
- "bad operand type for unary -: 'str'|"
- r"Unary negative expects numeric dtype, not datetime64\[ns\]"
- )
- with pytest.raises(TypeError, match=msg):
- (-df)
- with pytest.raises(TypeError, match=msg):
- (-df["a"])
-
- def test_invert(self, float_frame):
- df = float_frame
-
- tm.assert_frame_equal(-(df < 0), ~(df < 0))
-
- def test_invert_mixed(self):
- shape = (10, 5)
- df = pd.concat(
- [
- pd.DataFrame(np.zeros(shape, dtype="bool")),
- pd.DataFrame(np.zeros(shape, dtype=int)),
- ],
- axis=1,
- ignore_index=True,
- )
- result = ~df
- expected = pd.concat(
- [
- pd.DataFrame(np.ones(shape, dtype="bool")),
- pd.DataFrame(-np.ones(shape, dtype=int)),
- ],
- axis=1,
- ignore_index=True,
- )
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize(
- "df",
- [
- pd.DataFrame({"a": [-1, 1]}),
- pd.DataFrame({"a": [False, True]}),
- pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),
- ],
- )
- def test_pos_numeric(self, df):
- # GH#16073
- tm.assert_frame_equal(+df, df)
- tm.assert_series_equal(+df["a"], df["a"])
-
- @pytest.mark.parametrize(
- "df",
- [
- # numpy changing behavior in the future
- pytest.param(
- pd.DataFrame({"a": ["a", "b"]}),
- marks=[pytest.mark.filterwarnings("ignore")],
- ),
- pd.DataFrame({"a": np.array([-1, 2], dtype=object)}),
- pd.DataFrame({"a": [Decimal("-1.0"), Decimal("2.0")]}),
- ],
- )
- def test_pos_object(self, df):
- # GH#21380
- tm.assert_frame_equal(+df, df)
- tm.assert_series_equal(+df["a"], df["a"])
-
- @pytest.mark.parametrize(
- "df", [pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])})]
- )
- def test_pos_raises(self, df):
- msg = "Unary plus expects .* dtype, not datetime64\\[ns\\]"
- with pytest.raises(TypeError, match=msg):
- (+df)
- with pytest.raises(TypeError, match=msg):
- (+df["a"])
-
-
class TestDataFrameLogicalOperators:
# &, |, ^
diff --git a/pandas/tests/frame/test_unary.py b/pandas/tests/frame/test_unary.py
new file mode 100644
index 0000000000000..ea6243e2eae4a
--- /dev/null
+++ b/pandas/tests/frame/test_unary.py
@@ -0,0 +1,123 @@
+from decimal import Decimal
+
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+
+class TestDataFrameUnaryOperators:
+ # __pos__, __neg__, __inv__
+
+ @pytest.mark.parametrize(
+ "df,expected",
+ [
+ (pd.DataFrame({"a": [-1, 1]}), pd.DataFrame({"a": [1, -1]})),
+ (pd.DataFrame({"a": [False, True]}), pd.DataFrame({"a": [True, False]})),
+ (
+ pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),
+ pd.DataFrame({"a": pd.Series(pd.to_timedelta([1, -1]))}),
+ ),
+ ],
+ )
+ def test_neg_numeric(self, df, expected):
+ tm.assert_frame_equal(-df, expected)
+ tm.assert_series_equal(-df["a"], expected["a"])
+
+ @pytest.mark.parametrize(
+ "df, expected",
+ [
+ (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
+ ([Decimal("1.0"), Decimal("2.0")], [Decimal("-1.0"), Decimal("-2.0")]),
+ ],
+ )
+ def test_neg_object(self, df, expected):
+ # GH#21380
+ df = pd.DataFrame({"a": df})
+ expected = pd.DataFrame({"a": expected})
+ tm.assert_frame_equal(-df, expected)
+ tm.assert_series_equal(-df["a"], expected["a"])
+
+ @pytest.mark.parametrize(
+ "df",
+ [
+ pd.DataFrame({"a": ["a", "b"]}),
+ pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])}),
+ ],
+ )
+ def test_neg_raises(self, df):
+ msg = (
+ "bad operand type for unary -: 'str'|"
+ r"Unary negative expects numeric dtype, not datetime64\[ns\]"
+ )
+ with pytest.raises(TypeError, match=msg):
+ (-df)
+ with pytest.raises(TypeError, match=msg):
+ (-df["a"])
+
+ def test_invert(self, float_frame):
+ df = float_frame
+
+ tm.assert_frame_equal(-(df < 0), ~(df < 0))
+
+ def test_invert_mixed(self):
+ shape = (10, 5)
+ df = pd.concat(
+ [
+ pd.DataFrame(np.zeros(shape, dtype="bool")),
+ pd.DataFrame(np.zeros(shape, dtype=int)),
+ ],
+ axis=1,
+ ignore_index=True,
+ )
+ result = ~df
+ expected = pd.concat(
+ [
+ pd.DataFrame(np.ones(shape, dtype="bool")),
+ pd.DataFrame(-np.ones(shape, dtype=int)),
+ ],
+ axis=1,
+ ignore_index=True,
+ )
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "df",
+ [
+ pd.DataFrame({"a": [-1, 1]}),
+ pd.DataFrame({"a": [False, True]}),
+ pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),
+ ],
+ )
+ def test_pos_numeric(self, df):
+ # GH#16073
+ tm.assert_frame_equal(+df, df)
+ tm.assert_series_equal(+df["a"], df["a"])
+
+ @pytest.mark.parametrize(
+ "df",
+ [
+ # numpy changing behavior in the future
+ pytest.param(
+ pd.DataFrame({"a": ["a", "b"]}),
+ marks=[pytest.mark.filterwarnings("ignore")],
+ ),
+ pd.DataFrame({"a": np.array([-1, 2], dtype=object)}),
+ pd.DataFrame({"a": [Decimal("-1.0"), Decimal("2.0")]}),
+ ],
+ )
+ def test_pos_object(self, df):
+ # GH#21380
+ tm.assert_frame_equal(+df, df)
+ tm.assert_series_equal(+df["a"], df["a"])
+
+ @pytest.mark.parametrize(
+ "df", [pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])})]
+ )
+ def test_pos_raises(self, df):
+ msg = "Unary plus expects .* dtype, not datetime64\\[ns\\]"
+ with pytest.raises(TypeError, match=msg):
+ (+df)
+ with pytest.raises(TypeError, match=msg):
+ (+df["a"])
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 7629a472d6fca..217e5ae9ee32e 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -473,56 +473,3 @@ def test_logical_ops_df_compat(self):
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp_or1.to_frame())
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp_or.to_frame())
-
-
-class TestSeriesUnaryOps:
- # __neg__, __pos__, __inv__
-
- def test_neg(self):
- ser = tm.makeStringSeries()
- ser.name = "series"
- tm.assert_series_equal(-ser, -1 * ser)
-
- def test_invert(self):
- ser = tm.makeStringSeries()
- ser.name = "series"
- tm.assert_series_equal(-(ser < 0), ~(ser < 0))
-
- @pytest.mark.parametrize(
- "source, target",
- [
- ([1, 2, 3], [-1, -2, -3]),
- ([1, 2, None], [-1, -2, None]),
- ([-1, 0, 1], [1, 0, -1]),
- ],
- )
- def test_unary_minus_nullable_int(
- self, any_signed_nullable_int_dtype, source, target
- ):
- dtype = any_signed_nullable_int_dtype
- s = pd.Series(source, dtype=dtype)
- result = -s
- expected = pd.Series(target, dtype=dtype)
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize("source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]])
- def test_unary_plus_nullable_int(self, any_signed_nullable_int_dtype, source):
- dtype = any_signed_nullable_int_dtype
- expected = pd.Series(source, dtype=dtype)
- result = +expected
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize(
- "source, target",
- [
- ([1, 2, 3], [1, 2, 3]),
- ([1, -2, None], [1, 2, None]),
- ([-1, 0, 1], [1, 0, 1]),
- ],
- )
- def test_abs_nullable_int(self, any_signed_nullable_int_dtype, source, target):
- dtype = any_signed_nullable_int_dtype
- s = pd.Series(source, dtype=dtype)
- result = abs(s)
- expected = pd.Series(target, dtype=dtype)
- tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_unary.py b/pandas/tests/series/test_unary.py
new file mode 100644
index 0000000000000..40d5e56203c6c
--- /dev/null
+++ b/pandas/tests/series/test_unary.py
@@ -0,0 +1,57 @@
+import pytest
+
+from pandas import Series
+import pandas._testing as tm
+
+
+class TestSeriesUnaryOps:
+ # __neg__, __pos__, __inv__
+
+ def test_neg(self):
+ ser = tm.makeStringSeries()
+ ser.name = "series"
+ tm.assert_series_equal(-ser, -1 * ser)
+
+ def test_invert(self):
+ ser = tm.makeStringSeries()
+ ser.name = "series"
+ tm.assert_series_equal(-(ser < 0), ~(ser < 0))
+
+ @pytest.mark.parametrize(
+ "source, target",
+ [
+ ([1, 2, 3], [-1, -2, -3]),
+ ([1, 2, None], [-1, -2, None]),
+ ([-1, 0, 1], [1, 0, -1]),
+ ],
+ )
+ def test_unary_minus_nullable_int(
+ self, any_signed_nullable_int_dtype, source, target
+ ):
+ dtype = any_signed_nullable_int_dtype
+ ser = Series(source, dtype=dtype)
+ result = -ser
+ expected = Series(target, dtype=dtype)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]])
+ def test_unary_plus_nullable_int(self, any_signed_nullable_int_dtype, source):
+ dtype = any_signed_nullable_int_dtype
+ expected = Series(source, dtype=dtype)
+ result = +expected
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "source, target",
+ [
+ ([1, 2, 3], [1, 2, 3]),
+ ([1, -2, None], [1, 2, None]),
+ ([-1, 0, 1], [1, 0, 1]),
+ ],
+ )
+ def test_abs_nullable_int(self, any_signed_nullable_int_dtype, source, target):
+ dtype = any_signed_nullable_int_dtype
+ ser = Series(source, dtype=dtype)
+ result = abs(ser)
+ expected = Series(target, dtype=dtype)
+ tm.assert_series_equal(result, expected)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37230 | 2020-10-18T22:29:49Z | 2020-10-19T12:57:55Z | 2020-10-19T12:57:55Z | 2020-10-19T16:31:48Z |
CLN: consolidate exception messages in datetimelike validate_listlike | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index cc2c753857032..de0a246861961 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -444,7 +444,7 @@ def _validate_comparison_value(self, other, opname: str):
else:
try:
- other = self._validate_listlike(other, opname, allow_object=True)
+ other = self._validate_listlike(other, allow_object=True)
self._check_compatible_with(other)
except TypeError as err:
if is_object_dtype(getattr(other, "dtype", None)):
@@ -548,7 +548,7 @@ def _validate_scalar(self, value, msg: Optional[str] = None):
return value
- def _validate_listlike(self, value, opname: str, allow_object: bool = False):
+ def _validate_listlike(self, value, allow_object: bool = False):
if isinstance(value, type(self)):
return value
@@ -578,10 +578,9 @@ def _validate_listlike(self, value, opname: str, allow_object: bool = False):
elif not type(self)._is_recognized_dtype(value.dtype):
raise TypeError(
- f"{opname} requires compatible dtype or scalar, "
- f"not {type(value).__name__}"
+ f"value should be a '{self._scalar_type.__name__}', 'NaT', "
+ f"or array of those. Got '{type(value).__name__}' instead."
)
-
return value
def _validate_searchsorted_value(self, value):
@@ -589,7 +588,7 @@ def _validate_searchsorted_value(self, value):
if not is_list_like(value):
value = self._validate_scalar(value, msg)
else:
- value = self._validate_listlike(value, "searchsorted")
+ value = self._validate_listlike(value)
rv = self._unbox(value)
return self._rebox_native(rv)
@@ -600,7 +599,7 @@ def _validate_setitem_value(self, value):
f"or array of those. Got '{type(value).__name__}' instead."
)
if is_list_like(value):
- value = self._validate_listlike(value, "setitem")
+ value = self._validate_listlike(value)
else:
value = self._validate_scalar(value, msg)
@@ -622,7 +621,7 @@ def _validate_where_value(self, other):
if not is_list_like(other):
other = self._validate_scalar(other, msg)
else:
- other = self._validate_listlike(other, "where")
+ other = self._validate_listlike(other)
return self._unbox(other, setitem=True)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index d6d8cb267e06b..b6836a0bbe496 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -663,9 +663,7 @@ def _wrap_joined_index(self, joined: np.ndarray, other):
@doc(Index._convert_arr_indexer)
def _convert_arr_indexer(self, keyarr):
try:
- return self._data._validate_listlike(
- keyarr, "convert_arr_indexer", allow_object=True
- )
+ return self._data._validate_listlike(keyarr, allow_object=True)
except (ValueError, TypeError):
return com.asarray_tuplesafe(keyarr)
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index a961cf14b2e5c..ed7c7c31c6b8d 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -416,7 +416,10 @@ def test_setitem_raises(self):
def test_setitem_numeric_raises(self, arr1d, box):
# We dont case e.g. int64 to our own dtype for setitem
- msg = "requires compatible dtype"
+ msg = (
+ f"value should be a '{arr1d._scalar_type.__name__}', "
+ "'NaT', or array of those. Got"
+ )
with pytest.raises(TypeError, match=msg):
arr1d[:2] = box([0, 1])
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 9f136b4979bb7..78721fc2fe1c1 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -396,7 +396,7 @@ def test_searchsorted_invalid_types(self, other, index):
msg = "|".join(
[
"searchsorted requires compatible dtype or scalar",
- "Unexpected type for 'value'",
+ "value should be a 'Timestamp', 'NaT', or array of those. Got",
]
)
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index b3b8f4d55e4de..75d6f7d276518 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -136,7 +136,7 @@ def test_searchsorted_invalid_types(self, other, index):
msg = "|".join(
[
"searchsorted requires compatible dtype or scalar",
- "Unexpected type for 'value'",
+ "value should be a 'Timedelta', 'NaT', or array of those. Got",
]
)
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/indexes/period/test_searchsorted.py b/pandas/tests/indexes/period/test_searchsorted.py
index f2950b9f6065c..6ffdbbfcd2ce6 100644
--- a/pandas/tests/indexes/period/test_searchsorted.py
+++ b/pandas/tests/indexes/period/test_searchsorted.py
@@ -62,7 +62,7 @@ def test_searchsorted_invalid(self):
msg = "|".join(
[
"searchsorted requires compatible dtype or scalar",
- "Unexpected type for 'value'",
+ "value should be a 'Period', 'NaT', or array of those. Got",
]
)
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/indexes/timedeltas/test_searchsorted.py b/pandas/tests/indexes/timedeltas/test_searchsorted.py
index 3cf45931cf6b7..e3b52058469f0 100644
--- a/pandas/tests/indexes/timedeltas/test_searchsorted.py
+++ b/pandas/tests/indexes/timedeltas/test_searchsorted.py
@@ -21,6 +21,6 @@ def test_searchsorted_different_argument_classes(self, klass):
)
def test_searchsorted_invalid_argument_dtype(self, arg):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
- msg = "searchsorted requires compatible dtype"
+ msg = "value should be a 'Timedelta', 'NaT', or array of those. Got"
with pytest.raises(TypeError, match=msg):
idx.searchsorted(arg)
| The next step is to consolidate the messages in _validate_scalar, then we should be able to get rid of some duplicate _validate_foo methods | https://api.github.com/repos/pandas-dev/pandas/pulls/37229 | 2020-10-18T22:19:27Z | 2020-10-20T16:57:10Z | 2020-10-20T16:57:10Z | 2020-10-20T17:31:51Z |
TST: indexing tests for #21168, #27420, #15928, #30053 | diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index 6b27682ed5674..cf494b2ce87cc 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas.errors import InvalidIndexError
+from pandas.errors import InvalidIndexError, PerformanceWarning
import pandas as pd
from pandas import Categorical, Index, MultiIndex, date_range
@@ -646,6 +646,22 @@ def test_get_loc_duplicates2(self):
assert index.get_loc("D") == slice(0, 3)
+ def test_get_loc_past_lexsort_depth(self):
+ # GH#30053
+ idx = MultiIndex(
+ levels=[["a"], [0, 7], [1]],
+ codes=[[0, 0], [1, 0], [0, 0]],
+ names=["x", "y", "z"],
+ sortorder=0,
+ )
+ key = ("a", 7)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ # PerformanceWarning: indexing past lexsort depth may impact performance
+ result = idx.get_loc(key)
+
+ assert result == slice(0, 1, None)
+
class TestWhere:
def test_where(self):
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 1b659bec0e9e8..518ec9e997183 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -522,3 +522,47 @@ def test_loc_with_mi_indexer():
columns=["author", "price"],
)
tm.assert_frame_equal(result, expected)
+
+
+def test_getitem_str_slice(datapath):
+ # GH#15928
+ path = datapath("reshape", "merge", "data", "quotes2.csv")
+ df = pd.read_csv(path, parse_dates=["time"])
+ df2 = df.set_index(["ticker", "time"]).sort_index()
+
+ res = df2.loc[("AAPL", slice("2016-05-25 13:30:00")), :].droplevel(0)
+ expected = df2.loc["AAPL"].loc[slice("2016-05-25 13:30:00"), :]
+ tm.assert_frame_equal(res, expected)
+
+
+def test_3levels_leading_period_index():
+ # GH#24091
+ pi = pd.PeriodIndex(
+ ["20181101 1100", "20181101 1200", "20181102 1300", "20181102 1400"],
+ name="datetime",
+ freq="B",
+ )
+ lev2 = ["A", "A", "Z", "W"]
+ lev3 = ["B", "C", "Q", "F"]
+ mi = pd.MultiIndex.from_arrays([pi, lev2, lev3])
+
+ ser = pd.Series(range(4), index=mi, dtype=np.float64)
+ result = ser.loc[(pi[0], "A", "B")]
+ assert result == 0.0
+
+
+class TestKeyErrorsWithMultiIndex:
+ def test_missing_keys_raises_keyerror(self):
+ # GH#27420 KeyError, not TypeError
+ df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=["A", "B", "C"])
+ df2 = df.set_index(["A", "B"])
+
+ with pytest.raises(KeyError, match="1"):
+ df2.loc[(1, 6)]
+
+ def test_missing_key_raises_keyerror2(self):
+ # GH#21168 KeyError, not "IndexingError: Too many indexers"
+ ser = pd.Series(-1, index=pd.MultiIndex.from_product([[0, 1]] * 2))
+
+ with pytest.raises(KeyError, match=r"\(0, 3\)"):
+ ser.loc[0, 3]
| - [x] closes #21168
- [x] closes #27420
- [x] closes #15928
- [x] closes #30053
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37228 | 2020-10-18T22:14:44Z | 2020-10-20T02:08:30Z | 2020-10-20T02:08:29Z | 2020-10-20T02:08:43Z |
CLN: Simplify gathering of results in aggregate | diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index ba7638e269fc0..4ab0c2515f5fa 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -31,7 +31,7 @@
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import is_dict_like, is_list_like
-from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
+from pandas.core.dtypes.generic import ABCDataFrame, ABCNDFrame, ABCSeries
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
@@ -621,58 +621,27 @@ def aggregate(obj, arg: AggFuncType, *args, **kwargs):
# set the final keys
keys = list(arg.keys())
- # combine results
-
- def is_any_series() -> bool:
- # return a boolean if we have *any* nested series
- return any(isinstance(r, ABCSeries) for r in results.values())
-
- def is_any_frame() -> bool:
- # return a boolean if we have *any* nested series
- return any(isinstance(r, ABCDataFrame) for r in results.values())
-
- if isinstance(results, list):
- return concat(results, keys=keys, axis=1, sort=True), True
-
- elif is_any_frame():
- # we have a dict of DataFrames
- # return a MI DataFrame
+ # Avoid making two isinstance calls in all and any below
+ is_ndframe = [isinstance(r, ABCNDFrame) for r in results.values()]
+ # combine results
+ if all(is_ndframe):
keys_to_use = [k for k in keys if not results[k].empty]
# Have to check, if at least one DataFrame is not empty.
keys_to_use = keys_to_use if keys_to_use != [] else keys
- return (
- concat([results[k] for k in keys_to_use], keys=keys_to_use, axis=1),
- True,
+ axis = 0 if isinstance(obj, ABCSeries) else 1
+ result = concat({k: results[k] for k in keys_to_use}, axis=axis)
+ elif any(is_ndframe):
+ # There is a mix of NDFrames and scalars
+ raise ValueError(
+ "cannot perform both aggregation "
+ "and transformation operations "
+ "simultaneously"
)
+ else:
+ from pandas import Series
- elif isinstance(obj, ABCSeries) and is_any_series():
-
- # we have a dict of Series
- # return a MI Series
- try:
- result = concat(results)
- except TypeError as err:
- # we want to give a nice error here if
- # we have non-same sized objects, so
- # we don't automatically broadcast
-
- raise ValueError(
- "cannot perform both aggregation "
- "and transformation operations "
- "simultaneously"
- ) from err
-
- return result, True
-
- # fall thru
- from pandas import DataFrame, Series
-
- try:
- result = DataFrame(results)
- except ValueError:
# we have a dict of scalars
-
# GH 36212 use name only if obj is a series
if obj.ndim == 1:
obj = cast("Series", obj)
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 1f1017cfc1929..7d2549713c6bc 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -53,6 +53,7 @@ def _check(cls, inst) -> bool:
},
)
+ABCNDFrame = create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe"))
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 801307a8f9481..84b66d0c9519e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7426,9 +7426,9 @@ def _gotitem(
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
- max NaN 8.0
- min 1.0 2.0
sum 12.0 NaN
+ min 1.0 2.0
+ max NaN 8.0
Aggregate different functions over the columns and rename the index of the resulting
DataFrame.
diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py
index 598da9c52731e..f3cb630d1be0c 100644
--- a/pandas/tests/frame/apply/test_frame_apply.py
+++ b/pandas/tests/frame/apply/test_frame_apply.py
@@ -1254,7 +1254,7 @@ def test_agg_reduce(self, axis, float_frame):
# dict input with lists with multiple
func = dict([(name1, ["mean", "sum"]), (name2, ["sum", "max"])])
result = float_frame.agg(func, axis=axis)
- expected = DataFrame(
+ expected = pd.concat(
dict(
[
(
@@ -1278,7 +1278,8 @@ def test_agg_reduce(self, axis, float_frame):
),
),
]
- )
+ ),
+ axis=1,
)
expected = expected.T if axis in {1, "columns"} else expected
tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Reduces the number of paths when collecting results in `aggregation.aggregate`; one where we are gathering NDFrames using `concat` and the other where we are gathering scalars using `Series`.
The reason for the one test change is as follows. In one case, we previously gathered results using `DataFrame`. When this occurs and the indexes are not all equal, `DataFrame` will sort the index whereas `concat` will have the index in order of appearance. For example with
```
df = DataFrame(
{
'A': pd.Series([1, 2], index=['b', 'a']),
'B': pd.Series([3, 4], index=['c', 'a'])
}
)
```
gives
```
A B
a 2.0 4.0
b 1.0 NaN
c NaN 3.0
```
whereas using `concat` instead of `DataFrame` on the first line with `axis=1` gives:
```
A B
b 1.0 NaN
a 2.0 4.0
c NaN 3.0
```
If in this example you replace the 2nd index with ['b', 'a'] (so that they are equal), then both `Dataframe` and `concat` will produce the same result with index `['b', 'a']`. If on the other hand you replace the 2nd index with `['a', 'b']`, then `DataFrame` will result in index `['a', 'b']` whereas concat will result in index `['b', 'a']`. | https://api.github.com/repos/pandas-dev/pandas/pulls/37227 | 2020-10-18T21:14:48Z | 2020-10-22T23:49:42Z | 2020-10-22T23:49:42Z | 2020-10-22T23:54:28Z |
DOC: Fix typos and broken formatting | diff --git a/doc/source/getting_started/intro_tutorials/10_text_data.rst b/doc/source/getting_started/intro_tutorials/10_text_data.rst
index b7fb99a98d78f..b8db7de5b7b10 100644
--- a/doc/source/getting_started/intro_tutorials/10_text_data.rst
+++ b/doc/source/getting_started/intro_tutorials/10_text_data.rst
@@ -66,15 +66,15 @@ How to manipulate textual data?
<ul class="task-bullet">
<li>
-Make all name characters lowercase
+Make all name characters lowercase.
.. ipython:: python
titanic["Name"].str.lower()
To make each of the strings in the ``Name`` column lowercase, select the ``Name`` column
-(see :ref:`tutorial on selection of data <10min_tut_03_subset>`), add the ``str`` accessor and
-apply the ``lower`` method. As such, each of the strings is converted element wise.
+(see the :ref:`tutorial on selection of data <10min_tut_03_subset>`), add the ``str`` accessor and
+apply the ``lower`` method. As such, each of the strings is converted element-wise.
.. raw:: html
@@ -86,7 +86,7 @@ having a ``dt`` accessor, a number of
specialized string methods are available when using the ``str``
accessor. These methods have in general matching names with the
equivalent built-in string methods for single elements, but are applied
-element-wise (remember :ref:`element wise calculations <10min_tut_05_columns>`?)
+element-wise (remember :ref:`element-wise calculations <10min_tut_05_columns>`?)
on each of the values of the columns.
.. raw:: html
@@ -94,7 +94,7 @@ on each of the values of the columns.
<ul class="task-bullet">
<li>
-Create a new column ``Surname`` that contains the surname of the Passengers by extracting the part before the comma.
+Create a new column ``Surname`` that contains the surname of the passengers by extracting the part before the comma.
.. ipython:: python
@@ -135,7 +135,7 @@ More information on extracting parts of strings is available in the user guide s
<ul class="task-bullet">
<li>
-Extract the passenger data about the Countesses on board of the Titanic.
+Extract the passenger data about the countesses on board of the Titanic.
.. ipython:: python
@@ -145,15 +145,15 @@ Extract the passenger data about the Countesses on board of the Titanic.
titanic[titanic["Name"].str.contains("Countess")]
-(*Interested in her story? See *\ `Wikipedia <https://en.wikipedia.org/wiki/No%C3%ABl_Leslie,_Countess_of_Rothes>`__\ *!*)
+(*Interested in her story? See* `Wikipedia <https://en.wikipedia.org/wiki/No%C3%ABl_Leslie,_Countess_of_Rothes>`__\ *!*)
The string method :meth:`Series.str.contains` checks for each of the values in the
column ``Name`` if the string contains the word ``Countess`` and returns
-for each of the values ``True`` (``Countess`` is part of the name) of
+for each of the values ``True`` (``Countess`` is part of the name) or
``False`` (``Countess`` is not part of the name). This output can be used
to subselect the data using conditional (boolean) indexing introduced in
the :ref:`subsetting of data tutorial <10min_tut_03_subset>`. As there was
-only one Countess on the Titanic, we get one row as a result.
+only one countess on the Titanic, we get one row as a result.
.. raw:: html
@@ -220,7 +220,7 @@ we can do a selection using the ``loc`` operator, introduced in the
<ul class="task-bullet">
<li>
-In the "Sex" column, replace values of "male" by "M" and values of "female" by "F"
+In the "Sex" column, replace values of "male" by "M" and values of "female" by "F".
.. ipython:: python
@@ -256,7 +256,7 @@ a ``dictionary`` to define the mapping ``{from : to}``.
<h4>REMEMBER</h4>
- String methods are available using the ``str`` accessor.
-- String methods work element wise and can be used for conditional
+- String methods work element-wise and can be used for conditional
indexing.
- The ``replace`` method is a convenient method to convert values
according to a given dictionary.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This PR fixes minor typos in the getting started tutorial [How to manipulate textual data?](https://pandas.pydata.org/pandas-docs/stable/getting_started/intro_tutorials/10_text_data.html). I built the documentation to confirm that I fixed the formatting of the Wikipedia link. I also fixed the inconsistent use of "element-wise" versus "element wise". | https://api.github.com/repos/pandas-dev/pandas/pulls/37226 | 2020-10-18T19:45:07Z | 2020-10-22T19:37:39Z | 2020-10-22T19:37:39Z | 2020-10-22T21:02:51Z |
CLN: remove unused arguments in _get_string_slice | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 3713eb6da60ac..f336eec8c4cce 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4994,7 +4994,7 @@ def isin(self, values, level=None):
self._validate_index_level(level)
return algos.isin(self, values)
- def _get_string_slice(self, key: str_t, use_lhs: bool = True, use_rhs: bool = True):
+ def _get_string_slice(self, key: str_t):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 017dc6527944a..d6d8cb267e06b 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -398,16 +398,12 @@ def _partial_date_slice(
self,
reso: Resolution,
parsed: datetime,
- use_lhs: bool = True,
- use_rhs: bool = True,
):
"""
Parameters
----------
reso : Resolution
parsed : datetime
- use_lhs : bool, default True
- use_rhs : bool, default True
Returns
-------
@@ -422,8 +418,7 @@ def _partial_date_slice(
if self.is_monotonic:
if len(self) and (
- (use_lhs and t1 < self[0] and t2 < self[0])
- or (use_rhs and t1 > self[-1] and t2 > self[-1])
+ (t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
):
# we are out of range
raise KeyError
@@ -432,13 +427,13 @@ def _partial_date_slice(
# a monotonic (sorted) series can be sliced
# Use asi8.searchsorted to avoid re-validating Periods/Timestamps
- left = i8vals.searchsorted(unbox(t1), side="left") if use_lhs else None
- right = i8vals.searchsorted(unbox(t2), side="right") if use_rhs else None
+ left = i8vals.searchsorted(unbox(t1), side="left")
+ right = i8vals.searchsorted(unbox(t2), side="right")
return slice(left, right)
else:
- lhs_mask = (i8vals >= unbox(t1)) if use_lhs else True
- rhs_mask = (i8vals <= unbox(t2)) if use_rhs else True
+ lhs_mask = i8vals >= unbox(t1)
+ rhs_mask = i8vals <= unbox(t2)
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 479e2023a00cb..11417e0b3317e 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -729,11 +729,11 @@ def _maybe_cast_slice_bound(self, label, side: str, kind):
self._deprecate_mismatched_indexing(label)
return self._maybe_cast_for_get_loc(label)
- def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
+ def _get_string_slice(self, key: str):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(key, freq)
reso = Resolution.from_attrname(reso)
- loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)
+ loc = self._partial_date_slice(reso, parsed)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index ce2839ab9a8e1..4f92bb7bd7a87 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -622,12 +622,11 @@ def _validate_partial_date_slice(self, reso: Resolution):
# why is that check not needed?
raise ValueError
- def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
- # TODO: Check for non-True use_lhs/use_rhs
+ def _get_string_slice(self, key: str):
parsed, reso = parse_time_string(key, self.freq)
reso = Resolution.from_attrname(reso)
try:
- return self._partial_date_slice(reso, parsed, use_lhs, use_rhs)
+ return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37225 | 2020-10-18T18:45:20Z | 2020-10-19T22:40:07Z | 2020-10-19T22:40:07Z | 2020-10-19T22:45:41Z |
TYP: indexes | diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 3ffb1160c14ce..47a554b9101c1 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1289,10 +1289,8 @@ def interval_range(
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
- range_func = date_range
+ breaks = date_range(start=start, end=end, periods=periods, freq=freq)
else:
- range_func = timedelta_range
-
- breaks = range_func(start=start, end=end, periods=periods, freq=freq)
+ breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 41968c5972ea5..590b711d6d45a 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -148,7 +148,7 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
_supports_partial_string_indexing = True
# --------------------------------------------------------------------
- # methods that dispatch to array and wrap result in PeriodIndex
+ # methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@doc(PeriodArray.asfreq)
@@ -161,6 +161,24 @@ def to_timestamp(self, freq=None, how="start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
+ # error: Decorated property not supported [misc]
+ @property # type:ignore[misc]
+ @doc(PeriodArray.hour.fget)
+ def hour(self) -> Int64Index:
+ return Int64Index(self._data.hour, name=self.name)
+
+ # error: Decorated property not supported [misc]
+ @property # type:ignore[misc]
+ @doc(PeriodArray.minute.fget)
+ def minute(self) -> Int64Index:
+ return Int64Index(self._data.minute, name=self.name)
+
+ # error: Decorated property not supported [misc]
+ @property # type:ignore[misc]
+ @doc(PeriodArray.second.fget)
+ def second(self) -> Int64Index:
+ return Int64Index(self._data.second, name=self.name)
+
# ------------------------------------------------------------------------
# Index Constructors
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 13052c23a9f11..77b1076920f20 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -3,7 +3,7 @@
"""
from collections import abc
-from typing import TYPE_CHECKING, Iterable, List, Mapping, Union, overload
+from typing import TYPE_CHECKING, Iterable, List, Mapping, Type, Union, cast, overload
import numpy as np
@@ -30,7 +30,7 @@
from pandas.core.internals import concatenate_block_managers
if TYPE_CHECKING:
- from pandas import DataFrame
+ from pandas import DataFrame, Series
from pandas.core.generic import NDFrame
# ---------------------------------------------------------------------
@@ -455,14 +455,17 @@ def __init__(
self.new_axes = self._get_new_axes()
def get_result(self):
+ cons: Type[FrameOrSeriesUnion]
+ sample: FrameOrSeriesUnion
# series only
if self._is_series:
+ sample = cast("Series", self.objs[0])
# stack blocks
if self.bm_axis == 0:
name = com.consensus_name_attr(self.objs)
- cons = self.objs[0]._constructor
+ cons = sample._constructor
arrs = [ser._values for ser in self.objs]
@@ -475,7 +478,7 @@ def get_result(self):
data = dict(zip(range(len(self.objs)), self.objs))
# GH28330 Preserves subclassed objects through concat
- cons = self.objs[0]._constructor_expanddim
+ cons = sample._constructor_expanddim
index, columns = self.new_axes
df = cons(data, index=index)
@@ -484,6 +487,8 @@ def get_result(self):
# combine block managers
else:
+ sample = cast("DataFrame", self.objs[0])
+
mgrs_indexers = []
for obj in self.objs:
indexers = {}
@@ -506,7 +511,7 @@ def get_result(self):
if not self.copy:
new_data._consolidate_inplace()
- cons = self.objs[0]._constructor
+ cons = sample._constructor
return cons(new_data).__finalize__(self, method="concat")
def _get_result_dim(self) -> int:
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 20c0297448494..2110a2d400be8 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2177,7 +2177,7 @@ def TextParser(*args, **kwds):
return TextFileReader(*args, **kwds)
-def count_empty_vals(vals):
+def count_empty_vals(vals) -> int:
return sum(1 for v in vals if v == "" or v is None)
diff --git a/setup.cfg b/setup.cfg
index 8c86a723bbb59..d8525c12cf13e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -181,18 +181,12 @@ check_untyped_defs=False
[mypy-pandas.core.indexes.extension]
check_untyped_defs=False
-[mypy-pandas.core.indexes.interval]
-check_untyped_defs=False
-
[mypy-pandas.core.indexes.multi]
check_untyped_defs=False
[mypy-pandas.core.resample]
check_untyped_defs=False
-[mypy-pandas.core.reshape.concat]
-check_untyped_defs=False
-
[mypy-pandas.core.reshape.merge]
check_untyped_defs=False
@@ -214,9 +208,6 @@ check_untyped_defs=False
[mypy-pandas.io.parsers]
check_untyped_defs=False
-[mypy-pandas.plotting._matplotlib.converter]
-check_untyped_defs=False
-
[mypy-pandas.plotting._matplotlib.core]
check_untyped_defs=False
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37224 | 2020-10-18T18:15:48Z | 2020-10-29T15:51:24Z | 2020-10-29T15:51:24Z | 2020-10-29T15:55:27Z |
Backport PR #37149 on branch 1.1.x (BUG: GroupBy().fillna() performance regression) | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 5ffda03fad80f..6714afe59143e 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -358,6 +358,26 @@ def time_category_size(self):
self.draws.groupby(self.cats).size()
+class FillNA:
+ def setup(self):
+ N = 100
+ self.df = DataFrame(
+ {"group": [1] * N + [2] * N, "value": [np.nan, 1.0] * N}
+ ).set_index("group")
+
+ def time_df_ffill(self):
+ self.df.groupby("group").fillna(method="ffill")
+
+ def time_df_bfill(self):
+ self.df.groupby("group").fillna(method="bfill")
+
+ def time_srs_ffill(self):
+ self.df.groupby("group")["value"].fillna(method="ffill")
+
+ def time_srs_bfill(self):
+ self.df.groupby("group")["value"].fillna(method="bfill")
+
+
class GroupByMethods:
param_names = ["dtype", "method", "application"]
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index d5b6abd9f9de4..ad59711b90f6e 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -29,6 +29,7 @@ Bug fixes
~~~~~~~~~
- Bug causing ``groupby(...).sum()`` and similar to not preserve metadata (:issue:`29442`)
- Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` raising a ``ValueError`` when the target was read-only (:issue:`37174`)
+- Bug in :meth:`GroupBy.fillna` that introduced a performance regression after 1.0.5 (:issue:`36757`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index f80e2b3758ae1..b3ec9cf71786a 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1127,12 +1127,12 @@ def reset_identity(values):
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
- if ax.has_duplicates:
+ if ax.has_duplicates and not result.axes[self.axis].equals(ax):
indexer, _ = result.index.get_indexer_non_unique(ax.values)
indexer = algorithms.unique1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
- result = result.reindex(ax, axis=self.axis)
+ result = result.reindex(ax, axis=self.axis, copy=False)
elif self.group_keys:
| Backport PR #37149: BUG: GroupBy().fillna() performance regression | https://api.github.com/repos/pandas-dev/pandas/pulls/37223 | 2020-10-18T17:01:58Z | 2020-10-19T11:03:09Z | 2020-10-19T11:03:09Z | 2020-10-19T11:03:09Z |
Fix regression for is_monotonic_increasing with nan in MultiIndex | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index eb68ca38ea5b6..c4b3ab270cf8f 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -24,6 +24,7 @@ Fixed regressions
- Fixed regression in :class:`PeriodDtype` comparing both equal and unequal to its string representation (:issue:`37265`)
- Fixed regression in certain offsets (:meth:`pd.offsets.Day() <pandas.tseries.offsets.Day>` and below) no longer being hashable (:issue:`37267`)
- Fixed regression in :class:`StataReader` which required ``chunksize`` to be manually set when using an iterator to read a dataset (:issue:`37280`)
+- Fixed regression in :attr:`MultiIndex.is_monotonic_increasing` returning wrong results with ``NaN`` in at least one of the levels (:issue:`37220`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 380df22861218..beaf16c39683b 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1539,7 +1539,10 @@ def is_monotonic_increasing(self) -> bool:
return if the index is monotonic increasing (only equal or
increasing) values.
"""
- if all(x.is_monotonic for x in self.levels):
+ if any(-1 in code for code in self.codes):
+ return False
+
+ if all(level.is_monotonic for level in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
diff --git a/pandas/tests/indexes/multi/test_monotonic.py b/pandas/tests/indexes/multi/test_monotonic.py
index ca1cb0932f63d..8659573d8123a 100644
--- a/pandas/tests/indexes/multi/test_monotonic.py
+++ b/pandas/tests/indexes/multi/test_monotonic.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
import pandas as pd
from pandas import Index, MultiIndex
@@ -174,3 +175,14 @@ def test_is_strictly_monotonic_decreasing():
)
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is False
+
+
+@pytest.mark.parametrize("attr", ["is_monotonic_increasing", "is_monotonic_decreasing"])
+@pytest.mark.parametrize(
+ "values",
+ [[(np.nan,), (1,), (2,)], [(1,), (np.nan,), (2,)], [(1,), (2,), (np.nan,)]],
+)
+def test_is_monotonic_with_nans(values, attr):
+ # GH: 37220
+ idx = pd.MultiIndex.from_tuples(values, names=["test"])
+ assert getattr(idx, attr) is False
| - [x] closes #37220
- [x] closes #27498 (closes that one too probably, because we need this case)
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Found this while debugging a different issue. | https://api.github.com/repos/pandas-dev/pandas/pulls/37221 | 2020-10-18T14:03:54Z | 2020-10-27T12:51:14Z | 2020-10-27T12:51:13Z | 2020-10-27T13:12:34Z |
DOC: Updated resample.py and groupby.py to fix SA04 Errors | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index c6b47d09cf0bd..e21526a8f69e4 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1056,7 +1056,8 @@ cdef class _Timedelta(timedelta):
See Also
--------
- Timestamp.isoformat
+ Timestamp.isoformat : Function is used to convert the given
+ Timestamp object into the ISO format.
Notes
-----
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index f8ff5ac18bbd9..57f8f11d4d04c 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -232,8 +232,8 @@ def _from_factorized(cls, values, original):
See Also
--------
- factorize
- ExtensionArray.factorize
+ factorize : Top-level factorize method that dispatches here.
+ ExtensionArray.factorize : Encode the extension array as an enumerated type.
"""
raise AbstractMethodError(cls)
@@ -501,7 +501,7 @@ def _values_for_argsort(self) -> np.ndarray:
See Also
--------
- ExtensionArray.argsort
+ ExtensionArray.argsort : Return the indices that would sort this array.
"""
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
@@ -968,8 +968,8 @@ def take(
See Also
--------
- numpy.take
- api.extensions.take
+ numpy.take : Take elements from an array along an axis.
+ api.extensions.take : Take elements from an array.
Notes
-----
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 9ffe00cf3189a..b46e70f5a936d 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -418,7 +418,7 @@ def _values_for_argsort(self) -> np.ndarray:
See Also
--------
- ExtensionArray.argsort
+ ExtensionArray.argsort : Return the indices that would sort this array.
"""
data = self._data.copy()
data[self._mask] = -1
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 081a363ce03c6..1b6a428d7df0d 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2061,7 +2061,7 @@ def unique(self):
--------
pandas.unique
CategoricalIndex.unique
- Series.unique
+ Series.unique : Return unique values of Series object.
Examples
--------
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index b1b8b513320e9..52a10c96d5c21 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1259,8 +1259,10 @@ def isocalendar(self):
See Also
--------
- Timestamp.isocalendar
- datetime.date.isocalendar
+ Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
+ week number, and weekday for the given Timestamp object.
+ datetime.date.isocalendar : Return a named tuple object with
+ three components: year, week and weekday.
Examples
--------
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 88a5a88efe146..e53276369a46f 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -484,7 +484,7 @@ def _values_for_argsort(self) -> np.ndarray:
See Also
--------
- ExtensionArray.argsort
+ ExtensionArray.argsort : Return the indices that would sort this array.
"""
data = self._data.copy()
if self._mask.any():
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 96de54380c7ad..8630867c64f88 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -21,8 +21,9 @@ class ExtensionDtype:
See Also
--------
- extensions.register_extension_dtype
- extensions.ExtensionArray
+ extensions.register_extension_dtype: Register an ExtensionType
+ with pandas as class decorator.
+ extensions.ExtensionArray: Abstract base class for custom 1-D array types.
Notes
-----
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index eb150dd87347f..0ee6b22f1ca45 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -586,7 +586,7 @@ def shape(self) -> Tuple[int, int]:
See Also
--------
- ndarray.shape
+ ndarray.shape : Tuple of array dimensions.
Examples
--------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e000fe5fa733d..e7bb2df343998 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -260,7 +260,7 @@ def attrs(self) -> Dict[Optional[Hashable], Any]:
See Also
--------
- DataFrame.flags
+ DataFrame.flags : Global flags applying to this object.
"""
if self._attrs is None:
self._attrs = {}
@@ -281,8 +281,8 @@ def flags(self) -> Flags:
See Also
--------
- Flags
- DataFrame.attrs
+ Flags : Flags that apply to pandas objects.
+ DataFrame.attrs : Global metadata applying to this dataset.
Notes
-----
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index ab01f99ba11f9..413463c5e181f 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -79,8 +79,9 @@ class providing the base-class of operations.
_common_see_also = """
See Also
--------
- Series.%(name)s
- DataFrame.%(name)s
+ Series.%(name)s : Apply a function %(name)s to a Series.
+ DataFrame.%(name)s : Apply a function %(name)s
+ to each row or column of a DataFrame.
"""
_apply_docs = dict(
@@ -318,9 +319,12 @@ class providing the base-class of operations.
See Also
--------
-%(klass)s.groupby.apply
-%(klass)s.groupby.aggregate
-%(klass)s.transform
+%(klass)s.groupby.apply : Apply function func group-wise
+ and combine the results together.
+%(klass)s.groupby.aggregate : Aggregate using one or more
+ operations over the specified axis.
+%(klass)s.transform : Transforms the Series on each group
+ based on the given function.
Notes
-----
@@ -427,9 +431,12 @@ class providing the base-class of operations.
See Also
--------
-{klass}.groupby.apply
-{klass}.groupby.transform
-{klass}.aggregate
+{klass}.groupby.apply : Apply function func group-wise
+ and combine the results together.
+{klass}.groupby.transform : Aggregate using one or more
+ operations over the specified axis.
+{klass}.aggregate : Transforms the Series on each group
+ based on the given function.
Notes
-----
@@ -1856,8 +1863,8 @@ def _fill(self, direction, limit=None):
See Also
--------
- pad
- backfill
+ pad : Returns Series with minimum number of char in object.
+ backfill : Backward fill the missing values in the dataset.
"""
# Need int value for Cython
if limit is None:
@@ -1891,10 +1898,10 @@ def pad(self, limit=None):
See Also
--------
- Series.pad
- DataFrame.pad
- Series.fillna
- DataFrame.fillna
+ Series.pad: Returns Series with minimum number of char in object.
+ DataFrame.pad: Object with missing values filled or None if inplace=True.
+ Series.fillna: Fill NaN values of a Series.
+ DataFrame.fillna: Fill NaN values of a DataFrame.
"""
return self._fill("ffill", limit=limit)
@@ -1917,10 +1924,10 @@ def backfill(self, limit=None):
See Also
--------
- Series.backfill
- DataFrame.backfill
- Series.fillna
- DataFrame.fillna
+ Series.backfill : Backward fill the missing values in the dataset.
+ DataFrame.backfill: Backward fill the missing values in the dataset.
+ Series.fillna: Fill NaN values of a Series.
+ DataFrame.fillna: Fill NaN values of a DataFrame.
"""
return self._fill("bfill", limit=limit)
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index b9b2c4b07d37a..3eb18d176dd92 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -236,8 +236,10 @@ def isocalendar(self):
See Also
--------
- Timestamp.isocalendar
- datetime.date.isocalendar
+ Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
+ week number, and weekday for the given Timestamp object.
+ datetime.date.isocalendar : Return a named tuple object with
+ three components: year, week and weekday.
Examples
--------
@@ -326,7 +328,8 @@ def to_pytimedelta(self) -> np.ndarray:
See Also
--------
- datetime.timedelta
+ datetime.timedelta : A duration expressing the difference
+ between two date, time, or datetime.
Examples
--------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 006469f79780d..c710039d8fa0e 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -613,7 +613,7 @@ def ravel(self, order="C"):
See Also
--------
- numpy.ndarray.ravel
+ numpy.ndarray.ravel : Return a flattened array.
"""
warnings.warn(
"Index.ravel returning ndarray is deprecated; in a future version "
@@ -709,7 +709,8 @@ def astype(self, dtype, copy=True):
See Also
--------
- numpy.ndarray.take
+ numpy.ndarray.take: Return an array formed from the
+ elements of a at the given indices.
"""
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
@@ -2309,8 +2310,8 @@ def unique(self, level=None):
See Also
--------
- unique
- Series.unique
+ unique : Numpy array of unique values in that column.
+ Series.unique : Return unique values of Series object.
"""
if level is not None:
self._validate_index_level(level)
@@ -3953,7 +3954,7 @@ def _values(self) -> Union[ExtensionArray, np.ndarray]:
See Also
--------
- values
+ values : Values
"""
return self._data
@@ -4244,7 +4245,8 @@ def putmask(self, mask, value):
See Also
--------
- numpy.ndarray.putmask
+ numpy.ndarray.putmask : Changes elements of an array
+ based on conditional and input values.
"""
values = self.values.copy()
try:
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index caef938272f6f..78d217c4688b6 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -131,7 +131,7 @@ def __iter__(self):
See Also
--------
- GroupBy.__iter__
+ GroupBy.__iter__ : Generator yielding sequence for each group.
"""
self._set_binner()
return super().__iter__()
@@ -235,9 +235,12 @@ def pipe(self, func, *args, **kwargs):
"""
See Also
--------
- DataFrame.groupby.aggregate
- DataFrame.resample.transform
- DataFrame.aggregate
+ DataFrame.groupby.aggregate : Aggregate using callable, string, dict,
+ or list of string/callables.
+ DataFrame.resample.transform : Transforms the Series on each group
+ based on the given function.
+ DataFrame.aggregate: Aggregate using one or more
+ operations over the specified axis.
"""
)
@@ -454,8 +457,8 @@ def pad(self, limit=None):
See Also
--------
- Series.fillna
- DataFrame.fillna
+ Series.fillna: Fill NA/NaN values using the specified method.
+ DataFrame.fillna: Fill NA/NaN values using the specified method.
"""
return self._upsample("pad", limit=limit)
@@ -829,8 +832,8 @@ def asfreq(self, fill_value=None):
See Also
--------
- Series.asfreq
- DataFrame.asfreq
+ Series.asfreq: Convert TimeSeries to specified frequency.
+ DataFrame.asfreq: Convert TimeSeries to specified frequency.
"""
return self._upsample("asfreq", fill_value=fill_value)
@@ -916,8 +919,13 @@ def quantile(self, q=0.5, **kwargs):
See Also
--------
Series.quantile
+ Return a series, where the index is q and the values are the quantiles.
DataFrame.quantile
+ Return a DataFrame, where the columns are the columns of self,
+ and the values are the quantiles.
DataFrameGroupBy.quantile
+ Return a DataFrame, where the coulmns are groupby columns,
+ and the values are its quantiles.
"""
return self._downsample("quantile", q=q, **kwargs)
@@ -1073,7 +1081,7 @@ def _upsample(self, method, limit=None, fill_value=None):
See Also
--------
- .fillna
+ .fillna: Fill NA/NaN values using the specified method.
"""
self._set_binner()
@@ -1209,7 +1217,7 @@ def _upsample(self, method, limit=None, fill_value=None):
See Also
--------
- .fillna
+ .fillna: Fill NA/NaN values using the specified method.
"""
# we may need to actually resample as if we are timestamps
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 0089d7a32f723..2f3416cbf2d87 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -829,7 +829,8 @@ def applymap(self, func: Callable, subset=None, **kwargs) -> "Styler":
See Also
--------
- Styler.where
+ Styler.where: Updates the HTML representation with a style which is
+ selected in accordance with the return value of a function.
"""
self._todo.append(
(lambda instance: getattr(instance, "_applymap"), (func, subset), kwargs)
@@ -870,7 +871,7 @@ def where(
See Also
--------
- Styler.applymap
+ Styler.applymap: Updates the HTML representation with the result.
"""
if other is None:
other = ""
@@ -930,7 +931,7 @@ def export(self) -> List[Tuple[Callable, Tuple, Dict]]:
See Also
--------
- Styler.use
+ Styler.use: Set the styles on the current Styler.
"""
return self._todo
@@ -951,7 +952,7 @@ def use(self, styles: List[Tuple[Callable, Tuple, Dict]]) -> "Styler":
See Also
--------
- Styler.export
+ Styler.export : Export the styles to applied to the current Styler.
"""
self._todo.extend(styles)
return self
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry: Fixed Partial SA04 Errors
| https://api.github.com/repos/pandas-dev/pandas/pulls/37219 | 2020-10-18T13:09:21Z | 2020-10-29T11:14:22Z | 2020-10-29T11:14:22Z | 2020-10-29T16:07:25Z |
CLN: de-duplicate in arithmetic/test_numeric | diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 04ba41307d0ef..b5f14700088bb 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -172,16 +172,7 @@ def test_div_td64arr(self, left, box_cls):
with pytest.raises(TypeError, match=msg):
left // right
- # TODO: de-duplicate with test_numeric_arr_mul_tdscalar
- def test_ops_series(self):
- # regression test for G#H8813
- td = Timedelta("1 day")
- other = pd.Series([1, 2])
- expected = pd.Series(pd.to_timedelta(["1 day", "2 days"]))
- tm.assert_series_equal(expected, td * other)
- tm.assert_series_equal(expected, other * td)
-
- # TODO: also test non-nanosecond timedelta64 and Tick objects;
+ # TODO: also test Tick objects;
# see test_numeric_arr_rdiv_tdscalar for note on these failing
@pytest.mark.parametrize(
"scalar_td",
@@ -189,6 +180,8 @@ def test_ops_series(self):
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta(),
+ Timedelta(days=1).to_timedelta64().astype("timedelta64[s]"),
+ Timedelta(days=1).to_timedelta64().astype("timedelta64[ms]"),
],
ids=lambda x: type(x).__name__,
)
@@ -196,7 +189,7 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box_with_array):
# GH#19333
box = box_with_array
index = numeric_idx
- expected = pd.TimedeltaIndex([pd.Timedelta(days=n) for n in range(5)])
+ expected = pd.TimedeltaIndex([pd.Timedelta(days=n) for n in range(len(index))])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
- Move one test to another one parametrized.
- Address one part of TODO item | https://api.github.com/repos/pandas-dev/pandas/pulls/37215 | 2020-10-18T05:13:57Z | 2020-10-20T00:29:12Z | 2020-10-20T00:29:12Z | 2020-10-20T05:47:18Z |
DOC: macOS is the new name for Mac OS | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 73820f2d5ad65..7fbd2e1188901 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -206,7 +206,7 @@ You will need `Build Tools for Visual Studio 2017
scrolling down to "All downloads" -> "Tools for Visual Studio 2019".
In the installer, select the "C++ build tools" workload.
-**Mac OS**
+**macOS**
Information about compiler installation can be found here:
https://devguide.python.org/setup/#macos
@@ -299,7 +299,7 @@ Creating a Python environment (pip)
If you aren't using conda for your development environment, follow these instructions.
You'll need to have at least Python 3.6.1 installed on your system.
-**Unix**/**Mac OS with virtualenv**
+**Unix**/**macOS with virtualenv**
.. code-block:: bash
@@ -318,7 +318,7 @@ You'll need to have at least Python 3.6.1 installed on your system.
python setup.py build_ext --inplace -j 4
python -m pip install -e . --no-build-isolation --no-use-pep517
-**Unix**/**Mac OS with pyenv**
+**Unix**/**macOS with pyenv**
Consult the docs for setting up pyenv `here <https://github.com/pyenv/pyenv>`__.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37214 | 2020-10-18T01:55:15Z | 2020-10-18T15:00:14Z | 2020-10-18T15:00:14Z | 2022-07-15T23:39:34Z |
TYP: core.window | diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py
index 0505913aaf8cc..94875ba86db65 100644
--- a/pandas/core/window/expanding.py
+++ b/pandas/core/window/expanding.py
@@ -139,8 +139,8 @@ def aggregate(self, func, *args, **kwargs):
@Substitution(name="expanding")
@Appender(_shared_docs["count"])
- def count(self, **kwargs):
- return super().count(**kwargs)
+ def count(self):
+ return super().count()
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
diff --git a/setup.cfg b/setup.cfg
index 9f8776262268a..8c86a723bbb59 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -196,15 +196,6 @@ check_untyped_defs=False
[mypy-pandas.core.reshape.merge]
check_untyped_defs=False
-[mypy-pandas.core.window.common]
-check_untyped_defs=False
-
-[mypy-pandas.core.window.expanding]
-check_untyped_defs=False
-
-[mypy-pandas.core.window.rolling]
-check_untyped_defs=False
-
[mypy-pandas.io.clipboard]
check_untyped_defs=False
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37212 | 2020-10-18T00:50:59Z | 2020-10-18T14:57:00Z | 2020-10-18T14:57:00Z | 2020-10-18T17:49:26Z |
DOC: add value counts as related method to count | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 539275c7ff617..8730595204aa6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8518,6 +8518,7 @@ def count(self, axis=0, level=None, numeric_only=False):
See Also
--------
Series.count: Number of non-NA elements in a Series.
+ DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
| Counting values using `.value_counts()`, more generally, is a counting task, so it felt natural to link the `.count()` page to `.value_counts()`.
I copied the description for `.value_counts()` from the "See also" box in [`.drop_duplicates()`](https://github.com/pandas-dev/pandas/blob/9fed16cd4c302e47383480361260d63dc23cbefc/pandas/core/frame.py#L5102).
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37209 | 2020-10-17T21:51:22Z | 2020-10-22T23:54:09Z | 2020-10-22T23:54:09Z | 2020-11-09T05:09:36Z |
BUG: Join did not work correctly when one MultiIndex had only one level | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 393866b92771b..497ac68b70019 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -296,6 +296,7 @@ Groupby/resample/rolling
Reshaping
^^^^^^^^^
- Bug in :meth:`DataFrame.unstack` with missing levels led to incorrect index names (:issue:`37510`)
+- Bug in :func:`join` over :class:`MultiIndex` returned wrong result, when one of both indexes had only one level (:issue:`36909`)
- Bug in :func:`concat` incorrectly casting to ``object`` dtype in some cases when one or more of the operands is empty (:issue:`38843`, :issue:`38907`)
-
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2db803e5c1b19..98a5fe52f8473 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1674,7 +1674,7 @@ def _drop_level_numbers(self, levnums: List[int]):
Drop MultiIndex levels by level _number_, not name.
"""
- if not levnums:
+ if not levnums and not isinstance(self, ABCMultiIndex):
return self
if len(levnums) >= self.nlevels:
raise ValueError(
diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py
index f7b1bc4729428..dcb592287733c 100644
--- a/pandas/tests/indexes/multi/test_drop.py
+++ b/pandas/tests/indexes/multi/test_drop.py
@@ -180,3 +180,11 @@ def test_single_level_drop_partially_missing_elements():
msg = r"labels \['a'\] not found in level"
with pytest.raises(KeyError, match=msg):
mi.drop([np.nan, 1, "a"], level=0)
+
+
+def test_droplevel_multiindex_one_level():
+ # GH#37208
+ index = pd.MultiIndex.from_tuples([(2,)], names=("b",))
+ result = index.droplevel([])
+ expected = pd.Int64Index([2], name="b")
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index ad07ced2fca66..f64283147764f 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -815,3 +815,19 @@ def test_join_cross(input_col, output_cols):
result = left.join(right, how="cross", lsuffix="_x", rsuffix="_y")
expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]})
tm.assert_frame_equal(result, expected)
+
+
+def test_join_multiindex_one_level(join_type):
+ # GH#36909
+ left = DataFrame(
+ data={"c": 3}, index=pd.MultiIndex.from_tuples([(1, 2)], names=("a", "b"))
+ )
+ right = DataFrame(
+ data={"d": 4}, index=pd.MultiIndex.from_tuples([(2,)], names=("b",))
+ )
+ result = left.join(right, how=join_type)
+ expected = DataFrame(
+ {"c": [3], "d": [4]},
+ index=pd.MultiIndex.from_tuples([(2, 1)], names=["b", "a"]),
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #36909
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
``droplevel`` for ``MultiIndex`` returns an ``Index`` when the resulting ``MultiIndex`` had only one level. But if the input had only one level and no level should be dropped, the return was a ``MultiIndex``. This did not seem consistent, so I changed it, that in this case an Index would be returned too. If this is not the desired behavior, we could fix the ``join`` problems after calling this functions.
If this is not desired, we should add a note to the docstring, that the return for ``droplevel`` is a MultiIndex, if no level is dropped.
| https://api.github.com/repos/pandas-dev/pandas/pulls/37208 | 2020-10-17T21:51:17Z | 2021-01-03T22:22:53Z | 2021-01-03T22:22:53Z | 2021-01-03T22:26:06Z |
ENH: Support closed for fixed windows in rolling | diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index b24020848b363..b9f0683697ba6 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -652,9 +652,9 @@ parameter:
:header: "``closed``", "Description", "Default for"
:widths: 20, 30, 30
- ``right``, close right endpoint, time-based windows
+ ``right``, close right endpoint,
``left``, close left endpoint,
- ``both``, close both endpoints, fixed windows
+ ``both``, close both endpoints,
``neither``, open endpoints,
For example, having the right endpoint open is useful in many problems that require that there is no contamination
@@ -681,9 +681,6 @@ from present information back to past information. This allows the rolling windo
df
-Currently, this feature is only implemented for time-based windows.
-For fixed windows, the closed parameter cannot be set and the rolling window will always have both endpoints closed.
-
.. _stats.iter_rolling_window:
Iteration over window:
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 28c86015fb7b6..99d2a1ee27265 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -222,6 +222,7 @@ Other enhancements
- :meth:`DataFrame.plot` now recognizes ``xlabel`` and ``ylabel`` arguments for plots of type ``scatter`` and ``hexbin`` (:issue:`37001`)
- :class:`DataFrame` now supports ``divmod`` operation (:issue:`37165`)
- :meth:`DataFrame.to_parquet` now returns a ``bytes`` object when no ``path`` argument is passed (:issue:`37105`)
+- :class:`Rolling` now supports the ``closed`` argument for fixed windows (:issue:`34315`)
.. _whatsnew_120.api_breaking.python:
diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx
index 9af1159a805ec..6a49a5bb34855 100644
--- a/pandas/_libs/window/indexers.pyx
+++ b/pandas/_libs/window/indexers.pyx
@@ -43,16 +43,14 @@ def calculate_variable_window_bounds(
(ndarray[int64], ndarray[int64])
"""
cdef:
- bint left_closed = False
- bint right_closed = False
- int index_growth_sign = 1
+ bint left_closed = False, right_closed = False
ndarray[int64_t, ndim=1] start, end
- int64_t start_bound, end_bound
+ int64_t start_bound, end_bound, index_growth_sign = 1
Py_ssize_t i, j
- # if windows is variable, default is 'right', otherwise default is 'both'
+ # default is 'right'
if closed is None:
- closed = 'right' if index is not None else 'both'
+ closed = 'right'
if closed in ['right', 'both']:
right_closed = True
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index 71e77f97d8797..a8229257bb7bb 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -85,6 +85,10 @@ def get_window_bounds(
end = np.arange(1 + offset, num_values + 1 + offset, dtype="int64")
start = end - self.window_size
+ if closed in ["left", "both"]:
+ start -= 1
+ if closed in ["left", "neither"]:
+ end -= 1
end = np.clip(end, 0, num_values)
start = np.clip(start, 0, num_values)
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 1fcc47931e882..9136f9398799b 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -850,10 +850,11 @@ class Window(BaseWindow):
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
- 'neither' endpoints.
- For offset-based windows, it defaults to 'right'.
- For fixed windows, defaults to 'both'. Remaining cases not implemented
- for fixed windows.
+ 'neither' endpoints. Defaults to 'right'.
+
+ .. versionchanged:: 1.2.0
+
+ The closed parameter with fixed windows is now supported.
Returns
-------
@@ -1976,11 +1977,6 @@ def validate(self):
elif self.window < 0:
raise ValueError("window must be non-negative")
- if not self.is_datetimelike and self.closed is not None:
- raise ValueError(
- "closed only implemented for datetimelike and offset based windows"
- )
-
def _determine_window_length(self) -> Union[int, float]:
"""
Calculate freq for PeriodIndexes based on Index freq. Can not use
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 048f7b8287176..9bba6d084f9c9 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -122,14 +122,37 @@ def test_numpy_compat(method):
getattr(r, method)(dtype=np.float64)
-def test_closed():
- df = DataFrame({"A": [0, 1, 2, 3, 4]})
- # closed only allowed for datetimelike
+@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"])
+def test_closed_fixed(closed, arithmetic_win_operators):
+ # GH 34315
+ func_name = arithmetic_win_operators
+ df_fixed = DataFrame({"A": [0, 1, 2, 3, 4]})
+ df_time = DataFrame({"A": [0, 1, 2, 3, 4]}, index=date_range("2020", periods=5))
- msg = "closed only implemented for datetimelike and offset based windows"
+ result = getattr(df_fixed.rolling(2, closed=closed, min_periods=1), func_name)()
+ expected = getattr(df_time.rolling("2D", closed=closed), func_name)().reset_index(
+ drop=True
+ )
- with pytest.raises(ValueError, match=msg):
- df.rolling(window=3, closed="neither")
+ tm.assert_frame_equal(result, expected)
+
+
+def test_closed_fixed_binary_col():
+ # GH 34315
+ data = [0, 1, 1, 0, 0, 1, 0, 1]
+ df = DataFrame(
+ {"binary_col": data},
+ index=pd.date_range(start="2020-01-01", freq="min", periods=len(data)),
+ )
+
+ rolling = df.rolling(window=len(df), closed="left", min_periods=1)
+ result = rolling.mean()
+ expected = DataFrame(
+ [np.nan, 0, 0.5, 2 / 3, 0.5, 0.4, 0.5, 0.428571],
+ columns=["binary_col"],
+ index=pd.date_range(start="2020-01-01", freq="min", periods=len(data)),
+ )
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("closed", ["neither", "left"])
| - [x] closes #34315
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37207 | 2020-10-17T20:45:53Z | 2020-10-22T00:21:11Z | 2020-10-22T00:21:11Z | 2020-10-22T04:41:30Z |
DOC: remove debug messages by adding semicolons | diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 96d9fc6077325..c4ee8677a6b0d 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -64,7 +64,7 @@ On DataFrame, :meth:`~DataFrame.plot` is a convenience to plot all of the column
plt.figure();
@savefig frame_plot_basic.png
- df.plot()
+ df.plot();
You can plot one column versus another using the ``x`` and ``y`` keywords in
:meth:`~DataFrame.plot`:
@@ -119,7 +119,7 @@ For example, a bar plot can be created the following way:
plt.figure();
@savefig bar_plot_ex.png
- df.iloc[5].plot(kind="bar")
+ df.iloc[5].plot(kind="bar");
You can also create these other plots using the methods ``DataFrame.plot.<kind>`` instead of providing the ``kind`` keyword argument. This makes it easier to discover plot methods and the specific arguments they use:
@@ -180,7 +180,7 @@ bar plot:
df2 = pd.DataFrame(np.random.rand(10, 4), columns=["a", "b", "c", "d"])
@savefig bar_plot_multi_ex.png
- df2.plot.bar()
+ df2.plot.bar();
To produce a stacked bar plot, pass ``stacked=True``:
@@ -193,7 +193,7 @@ To produce a stacked bar plot, pass ``stacked=True``:
.. ipython:: python
@savefig bar_plot_stacked_ex.png
- df2.plot.bar(stacked=True)
+ df2.plot.bar(stacked=True);
To get horizontal bar plots, use the ``barh`` method:
@@ -206,7 +206,7 @@ To get horizontal bar plots, use the ``barh`` method:
.. ipython:: python
@savefig barh_plot_stacked_ex.png
- df2.plot.barh(stacked=True)
+ df2.plot.barh(stacked=True);
.. _visualization.hist:
@@ -414,7 +414,7 @@ groupings. For instance,
df = pd.DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = pd.Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
- plt.figure()
+ plt.figure();
@savefig box_plot_ex2.png
bp = df.boxplot(by="X")
@@ -518,7 +518,7 @@ When input data contains ``NaN``, it will be automatically filled by 0. If you w
df = pd.DataFrame(np.random.rand(10, 4), columns=["a", "b", "c", "d"])
@savefig area_plot_stacked.png
- df.plot.area()
+ df.plot.area();
To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5 unless otherwise specified:
@@ -531,7 +531,7 @@ To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5
.. ipython:: python
@savefig area_plot_unstacked.png
- df.plot.area(stacked=False)
+ df.plot.area(stacked=False);
.. _visualization.scatter:
@@ -554,7 +554,7 @@ These can be specified by the ``x`` and ``y`` keywords.
df = pd.DataFrame(np.random.rand(50, 4), columns=["a", "b", "c", "d"])
@savefig scatter_plot.png
- df.plot.scatter(x="a", y="b")
+ df.plot.scatter(x="a", y="b");
To plot multiple column groups in a single axes, repeat ``plot`` method specifying target ``ax``.
It is recommended to specify ``color`` and ``label`` keywords to distinguish each groups.
@@ -563,7 +563,7 @@ It is recommended to specify ``color`` and ``label`` keywords to distinguish eac
ax = df.plot.scatter(x="a", y="b", color="DarkBlue", label="Group 1")
@savefig scatter_plot_repeated.png
- df.plot.scatter(x="c", y="d", color="DarkGreen", label="Group 2", ax=ax)
+ df.plot.scatter(x="c", y="d", color="DarkGreen", label="Group 2", ax=ax);
.. ipython:: python
:suppress:
@@ -576,7 +576,7 @@ each point:
.. ipython:: python
@savefig scatter_plot_colored.png
- df.plot.scatter(x="a", y="b", c="c", s=50)
+ df.plot.scatter(x="a", y="b", c="c", s=50);
.. ipython:: python
@@ -591,7 +591,7 @@ bubble chart using a column of the ``DataFrame`` as the bubble size.
.. ipython:: python
@savefig scatter_plot_bubble.png
- df.plot.scatter(x="a", y="b", s=df["c"] * 200)
+ df.plot.scatter(x="a", y="b", s=df["c"] * 200);
.. ipython:: python
:suppress:
@@ -837,7 +837,7 @@ You can create a scatter plot matrix using the
df = pd.DataFrame(np.random.randn(1000, 4), columns=["a", "b", "c", "d"])
@savefig scatter_matrix_kde.png
- scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal="kde")
+ scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal="kde");
.. ipython:: python
:suppress:
@@ -1086,7 +1086,7 @@ layout and formatting of the returned plot:
plt.figure();
@savefig series_plot_basic2.png
- ts.plot(style="k--", label="Series")
+ ts.plot(style="k--", label="Series");
.. ipython:: python
:suppress:
@@ -1144,7 +1144,7 @@ it empty for ylabel.
df.plot();
@savefig plot_xlabel_ylabel.png
- df.plot(xlabel="new x", ylabel="new y")
+ df.plot(xlabel="new x", ylabel="new y");
.. ipython:: python
:suppress:
@@ -1320,7 +1320,7 @@ with the ``subplots`` keyword:
.. ipython:: python
@savefig frame_plot_subplots.png
- df.plot(subplots=True, figsize=(6, 6))
+ df.plot(subplots=True, figsize=(6, 6));
.. ipython:: python
:suppress:
@@ -1343,7 +1343,7 @@ or columns needed, given the other.
.. ipython:: python
@savefig frame_plot_subplots_layout.png
- df.plot(subplots=True, layout=(2, 3), figsize=(6, 6), sharex=False)
+ df.plot(subplots=True, layout=(2, 3), figsize=(6, 6), sharex=False);
.. ipython:: python
:suppress:
@@ -1354,7 +1354,7 @@ The above example is identical to using:
.. ipython:: python
- df.plot(subplots=True, layout=(2, -1), figsize=(6, 6), sharex=False)
+ df.plot(subplots=True, layout=(2, -1), figsize=(6, 6), sharex=False);
.. ipython:: python
:suppress:
@@ -1379,9 +1379,9 @@ otherwise you will see a warning.
target1 = [axes[0][0], axes[1][1], axes[2][2], axes[3][3]]
target2 = [axes[3][0], axes[2][1], axes[1][2], axes[0][3]]
- df.plot(subplots=True, ax=target1, legend=False, sharex=False, sharey=False)
+ df.plot(subplots=True, ax=target1, legend=False, sharex=False, sharey=False);
@savefig frame_plot_subplots_multi_ax.png
- (-df).plot(subplots=True, ax=target2, legend=False, sharex=False, sharey=False)
+ (-df).plot(subplots=True, ax=target2, legend=False, sharex=False, sharey=False);
.. ipython:: python
:suppress:
@@ -1409,15 +1409,15 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a
fig, axes = plt.subplots(nrows=2, ncols=2)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
- df["A"].plot(ax=axes[0, 0])
- axes[0, 0].set_title("A")
- df["B"].plot(ax=axes[0, 1])
- axes[0, 1].set_title("B")
- df["C"].plot(ax=axes[1, 0])
- axes[1, 0].set_title("C")
- df["D"].plot(ax=axes[1, 1])
+ df["A"].plot(ax=axes[0, 0]);
+ axes[0, 0].set_title("A");
+ df["B"].plot(ax=axes[0, 1]);
+ axes[0, 1].set_title("B");
+ df["C"].plot(ax=axes[1, 0]);
+ axes[1, 0].set_title("C");
+ df["D"].plot(ax=axes[1, 1]);
@savefig series_plot_multi.png
- axes[1, 1].set_title("D")
+ axes[1, 1].set_title("D");
.. ipython:: python
:suppress:
| remove last few remaining debug messages that slipped through the cracks by adding semicolons after plot functions
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37206 | 2020-10-17T20:42:33Z | 2020-10-18T14:54:30Z | 2020-10-18T14:54:30Z | 2022-07-15T23:39:49Z |
BUG: Don't copy DataFrame columns as metadata | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index d08e8e009811a..54ed605bf87e4 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -483,6 +483,7 @@ Other
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`)
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`)
+- Fixed bug in metadata propagation incorrectly copying DataFrame columns as metadata when the column name overlaps with the metadata name (:issue:`37037`)
- Fixed metadata propagation in the :class:`Series.dt` and :class:`Series.str` accessors (:issue:`28283`)
- Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`)
- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError``, from a bare ``Exception`` previously (:issue:`35744`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3079bb0b79b33..48044831d7ad6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5346,7 +5346,7 @@ def __finalize__(
self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels
# For subclasses using _metadata.
- for name in self._metadata:
+ for name in set(self._metadata) & set(other._metadata):
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index 25c926b1de4c6..7be5837dac158 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -772,3 +772,11 @@ def test_groupby(obj, method):
obj.attrs = {"a": 1}
result = method(obj.groupby([0, 0]))
assert result.attrs == {"a": 1}
+
+
+def test_finalize_frame_series_name():
+ # https://github.com/pandas-dev/pandas/pull/37186/files#r506978889
+ # ensure we don't copy the column `name` to the Series.
+ df = pd.DataFrame({"name": [1, 2]})
+ result = pd.Series([1, 2]).__finalize__(df)
+ assert result.name is None
| See https://github.com/pandas-dev/pandas/pull/37186/files#r506978889.
Basically, we don't want to accidentally do `series.name =
dataframe.name` and try to set a column.
cc @jorisvandenbossche. This could conceivably affect geopandas, since geodataframe has different metadata than geoseries: https://github.com/geopandas/geopandas/blob/924cdf65c7c15b01749d1cdd036c5c291e87b0f4/geopandas/geodataframe.py#L83
But I don't think it'll actually break anything for you. | https://api.github.com/repos/pandas-dev/pandas/pulls/37205 | 2020-10-17T20:14:42Z | 2020-10-20T00:26:53Z | 2020-10-20T00:26:53Z | 2020-10-28T11:33:02Z |
ENH: Support all Scipy window types in rolling(..., win_type) | diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index b9f0683697ba6..45d15f29fcce8 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -451,6 +451,10 @@ The list of recognized types are the `scipy.signal window functions
* ``slepian`` (needs width)
* ``exponential`` (needs tau).
+.. versionadded:: 1.2.0
+
+All Scipy window types, concurrent with your installed version, are recognized ``win_types``.
+
.. ipython:: python
ser = pd.Series(np.random.randn(10), index=pd.date_range("1/1/2000", periods=10))
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 83614d7a9628b..c6edf4eb0e88e 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -226,6 +226,7 @@ Other enhancements
- :meth:`DataFrame.to_parquet` now returns a ``bytes`` object when no ``path`` argument is passed (:issue:`37105`)
- :class:`Rolling` now supports the ``closed`` argument for fixed windows (:issue:`34315`)
- :class:`DatetimeIndex` and :class:`Series` with ``datetime64`` or ``datetime64tz`` dtypes now support ``std`` (:issue:`37436`)
+- :class:`Window` now supports all Scipy window types in ``win_type`` with flexible keyword argument support (:issue:`34556`)
.. _whatsnew_120.api_breaking.python:
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index bfc31021a8f87..a976350a419fe 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -873,30 +873,14 @@ class Window(BaseWindow):
To learn more about the offsets & frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
- The recognized win_types are:
-
- * ``boxcar``
- * ``triang``
- * ``blackman``
- * ``hamming``
- * ``bartlett``
- * ``parzen``
- * ``bohman``
- * ``blackmanharris``
- * ``nuttall``
- * ``barthann``
- * ``kaiser`` (needs parameter: beta)
- * ``gaussian`` (needs parameter: std)
- * ``general_gaussian`` (needs parameters: power, width)
- * ``slepian`` (needs parameter: width)
- * ``exponential`` (needs parameter: tau), center is set to None.
-
- If ``win_type=None`` all points are evenly weighted. To learn more about
- different window types see `scipy.signal window functions
+ If ``win_type=None``, all points are evenly weighted; otherwise, ``win_type``
+ can accept a string of any `scipy.signal window function
<https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__.
- Certain window types require additional parameters to be passed. Please see
- the third example below on how to add the additional parameters.
+ Certain Scipy window types require additional parameters to be passed
+ in the aggregation function. The additional parameters must match
+ the keywords specified in the Scipy window type method signature.
+ Please see the third example below on how to add the additional parameters.
Examples
--------
@@ -1000,71 +984,22 @@ def _constructor(self):
def validate(self):
super().validate()
- window = self.window
- if isinstance(window, BaseIndexer):
+ if isinstance(self.window, BaseIndexer):
raise NotImplementedError(
"BaseIndexer subclasses not implemented with win_types."
)
- elif isinstance(window, (list, tuple, np.ndarray)):
- pass
- elif is_integer(window):
- if window <= 0:
+ elif is_integer(self.window):
+ if self.window <= 0:
raise ValueError("window must be > 0 ")
- import_optional_dependency(
- "scipy", extra="Scipy is required to generate window weight."
+ sig = import_optional_dependency(
+ "scipy.signal", extra="Scipy is required to generate window weight."
)
- import scipy.signal as sig
-
if not isinstance(self.win_type, str):
raise ValueError(f"Invalid win_type {self.win_type}")
if getattr(sig, self.win_type, None) is None:
raise ValueError(f"Invalid win_type {self.win_type}")
else:
- raise ValueError(f"Invalid window {window}")
-
- def _get_win_type(self, kwargs: Dict[str, Any]) -> Union[str, Tuple]:
- """
- Extract arguments for the window type, provide validation for it
- and return the validated window type.
-
- Parameters
- ----------
- kwargs : dict
-
- Returns
- -------
- win_type : str, or tuple
- """
- # the below may pop from kwargs
- def _validate_win_type(win_type, kwargs):
- arg_map = {
- "kaiser": ["beta"],
- "gaussian": ["std"],
- "general_gaussian": ["power", "width"],
- "slepian": ["width"],
- "exponential": ["tau"],
- }
-
- if win_type in arg_map:
- win_args = _pop_args(win_type, arg_map[win_type], kwargs)
- if win_type == "exponential":
- # exponential window requires the first arg (center)
- # to be set to None (necessary for symmetric window)
- win_args.insert(0, None)
-
- return tuple([win_type] + win_args)
-
- return win_type
-
- def _pop_args(win_type, arg_names, kwargs):
- all_args = []
- for n in arg_names:
- if n not in kwargs:
- raise ValueError(f"{win_type} window requires {n}")
- all_args.append(kwargs.pop(n))
- return all_args
-
- return _validate_win_type(self.win_type, kwargs)
+ raise ValueError(f"Invalid window {self.window}")
def _center_window(self, result: np.ndarray, offset: int) -> np.ndarray:
"""
@@ -1079,31 +1014,6 @@ def _center_window(self, result: np.ndarray, offset: int) -> np.ndarray:
result = np.copy(result[tuple(lead_indexer)])
return result
- def _get_window_weights(
- self, win_type: Optional[Union[str, Tuple]] = None
- ) -> np.ndarray:
- """
- Get the window, weights.
-
- Parameters
- ----------
- win_type : str, or tuple
- type of window to create
-
- Returns
- -------
- window : ndarray
- the window, weights
- """
- window = self.window
- if isinstance(window, (list, tuple, np.ndarray)):
- return com.asarray_tuplesafe(window).astype(float)
- elif is_integer(window):
- import scipy.signal as sig
-
- # GH #15662. `False` makes symmetric window, rather than periodic.
- return sig.get_window(win_type, window, False).astype(float)
-
def _apply(
self,
func: Callable[[np.ndarray, int, int], np.ndarray],
@@ -1124,14 +1034,17 @@ def _apply(
whether to cache a numba compiled function. Only available for numba
enabled methods (so far only apply)
**kwargs
- additional arguments for rolling function and window function
+ additional arguments for scipy windows if necessary
Returns
-------
y : type of input
"""
- win_type = self._get_win_type(kwargs)
- window = self._get_window_weights(win_type=win_type)
+ signal = import_optional_dependency(
+ "scipy.signal", extra="Scipy is required to generate window weight."
+ )
+ assert self.win_type is not None # for mypy
+ window = getattr(signal, self.win_type)(self.window, **kwargs)
offset = (len(window) - 1) // 2 if self.center else 0
def homogeneous_func(values: np.ndarray):
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index 2f622c2bc3e60..39b3a9a630760 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -431,7 +431,7 @@ def test_cmov_window_special(win_types_special):
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
- "general_gaussian": {"power": 2.0, "width": 2.0},
+ "general_gaussian": {"p": 2.0, "sig": 2.0},
"exponential": {"tau": 10},
}
@@ -503,7 +503,7 @@ def test_cmov_window_special_linear_range(win_types_special):
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
- "general_gaussian": {"power": 2.0, "width": 2.0},
+ "general_gaussian": {"p": 2.0, "sig": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py
index a3fff3122f80a..eab62b3383283 100644
--- a/pandas/tests/window/test_window.py
+++ b/pandas/tests/window/test_window.py
@@ -6,7 +6,6 @@
import pandas as pd
from pandas import Series
-from pandas.core.window import Window
@td.skip_if_no_scipy
@@ -50,7 +49,7 @@ def test_constructor_with_win_type(which, win_types):
@pytest.mark.parametrize("method", ["sum", "mean"])
def test_numpy_compat(method):
# see gh-12811
- w = Window(Series([2, 4, 6]), window=[0, 2])
+ w = Series([2, 4, 6]).rolling(window=2)
msg = "numpy operations are not valid with window objects"
@@ -75,3 +74,11 @@ def test_agg_function_support(arg):
with pytest.raises(AttributeError, match=msg):
roll.agg({"A": arg})
+
+
+@td.skip_if_no_scipy
+def test_invalid_scipy_arg():
+ # This error is raised by scipy
+ msg = r"boxcar\(\) got an unexpected"
+ with pytest.raises(TypeError, match=msg):
+ Series(range(3)).rolling(1, win_type="boxcar").mean(foo="bar")
| - [x] closes #34556
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Additionally cleans up some helper methods | https://api.github.com/repos/pandas-dev/pandas/pulls/37204 | 2020-10-17T18:49:01Z | 2020-10-31T15:34:38Z | 2020-10-31T15:34:38Z | 2020-10-31T22:57:52Z |
CLN: clean color selection in _matplotlib/style | diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py
index b919728971505..b2c7b2610845c 100644
--- a/pandas/plotting/_matplotlib/style.py
+++ b/pandas/plotting/_matplotlib/style.py
@@ -1,4 +1,14 @@
-# being a bit too dynamic
+from typing import (
+ TYPE_CHECKING,
+ Collection,
+ Dict,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Union,
+ cast,
+)
import warnings
import matplotlib.cm as cm
@@ -9,92 +19,256 @@
import pandas.core.common as com
+if TYPE_CHECKING:
+ from matplotlib.colors import Colormap
+
+
+Color = Union[str, Sequence[float]]
+
def get_standard_colors(
- num_colors: int, colormap=None, color_type: str = "default", color=None
+ num_colors: int,
+ colormap: Optional["Colormap"] = None,
+ color_type: str = "default",
+ color: Optional[Union[Dict[str, Color], Color, Collection[Color]]] = None,
):
- import matplotlib.pyplot as plt
+ """
+ Get standard colors based on `colormap`, `color_type` or `color` inputs.
+
+ Parameters
+ ----------
+ num_colors : int
+ Minimum number of colors to be returned.
+ Ignored if `color` is a dictionary.
+ colormap : :py:class:`matplotlib.colors.Colormap`, optional
+ Matplotlib colormap.
+ When provided, the resulting colors will be derived from the colormap.
+ color_type : {"default", "random"}, optional
+ Type of colors to derive. Used if provided `color` and `colormap` are None.
+ Ignored if either `color` or `colormap` are not None.
+ color : dict or str or sequence, optional
+ Color(s) to be used for deriving sequence of colors.
+ Can be either be a dictionary, or a single color (single color string,
+ or sequence of floats representing a single color),
+ or a sequence of colors.
+
+ Returns
+ -------
+ dict or list
+ Standard colors. Can either be a mapping if `color` was a dictionary,
+ or a list of colors with a length of `num_colors` or more.
+
+ Warns
+ -----
+ UserWarning
+ If both `colormap` and `color` are provided.
+ Parameter `color` will override.
+ """
+ if isinstance(color, dict):
+ return color
+
+ colors = _derive_colors(
+ color=color,
+ colormap=colormap,
+ color_type=color_type,
+ num_colors=num_colors,
+ )
+
+ return _cycle_colors(colors, num_colors=num_colors)
+
+
+def _derive_colors(
+ *,
+ color: Optional[Union[Color, Collection[Color]]],
+ colormap: Optional[Union[str, "Colormap"]],
+ color_type: str,
+ num_colors: int,
+) -> List[Color]:
+ """
+ Derive colors from either `colormap`, `color_type` or `color` inputs.
+
+ Get a list of colors either from `colormap`, or from `color`,
+ or from `color_type` (if both `colormap` and `color` are None).
+
+ Parameters
+ ----------
+ color : str or sequence, optional
+ Color(s) to be used for deriving sequence of colors.
+ Can be either be a single color (single color string, or sequence of floats
+ representing a single color), or a sequence of colors.
+ colormap : :py:class:`matplotlib.colors.Colormap`, optional
+ Matplotlib colormap.
+ When provided, the resulting colors will be derived from the colormap.
+ color_type : {"default", "random"}, optional
+ Type of colors to derive. Used if provided `color` and `colormap` are None.
+ Ignored if either `color` or `colormap`` are not None.
+ num_colors : int
+ Number of colors to be extracted.
+ Returns
+ -------
+ list
+ List of colors extracted.
+
+ Warns
+ -----
+ UserWarning
+ If both `colormap` and `color` are provided.
+ Parameter `color` will override.
+ """
if color is None and colormap is not None:
- if isinstance(colormap, str):
- cmap = colormap
- colormap = cm.get_cmap(colormap)
- if colormap is None:
- raise ValueError(f"Colormap {cmap} is not recognized")
- colors = [colormap(num) for num in np.linspace(0, 1, num=num_colors)]
+ return _get_colors_from_colormap(colormap, num_colors=num_colors)
elif color is not None:
if colormap is not None:
warnings.warn(
"'color' and 'colormap' cannot be used simultaneously. Using 'color'"
)
- colors = (
- list(color)
- if is_list_like(color) and not isinstance(color, dict)
- else color
- )
+ return _get_colors_from_color(color)
else:
- if color_type == "default":
- # need to call list() on the result to copy so we don't
- # modify the global rcParams below
- try:
- colors = [c["color"] for c in list(plt.rcParams["axes.prop_cycle"])]
- except KeyError:
- colors = list(plt.rcParams.get("axes.color_cycle", list("bgrcmyk")))
- if isinstance(colors, str):
- colors = list(colors)
-
- colors = colors[0:num_colors]
- elif color_type == "random":
-
- def random_color(column):
- """ Returns a random color represented as a list of length 3"""
- # GH17525 use common._random_state to avoid resetting the seed
- rs = com.random_state(column)
- return rs.rand(3).tolist()
-
- colors = [random_color(num) for num in range(num_colors)]
- else:
- raise ValueError("color_type must be either 'default' or 'random'")
+ return _get_colors_from_color_type(color_type, num_colors=num_colors)
- if isinstance(colors, str) and _is_single_color(colors):
- # GH #36972
- colors = [colors]
- # Append more colors by cycling if there is not enough color.
- # Extra colors will be ignored by matplotlib if there are more colors
- # than needed and nothing needs to be done here.
+def _cycle_colors(colors: List[Color], num_colors: int) -> List[Color]:
+ """Append more colors by cycling if there is not enough color.
+
+ Extra colors will be ignored by matplotlib if there are more colors
+ than needed and nothing needs to be done here.
+ """
if len(colors) < num_colors:
- try:
- multiple = num_colors // len(colors) - 1
- except ZeroDivisionError:
- raise ValueError("Invalid color argument: ''")
+ multiple = num_colors // len(colors) - 1
mod = num_colors % len(colors)
-
colors += multiple * colors
colors += colors[:mod]
return colors
-def _is_single_color(color: str) -> bool:
- """Check if ``color`` is a single color.
+def _get_colors_from_colormap(
+ colormap: Union[str, "Colormap"],
+ num_colors: int,
+) -> List[Color]:
+ """Get colors from colormap."""
+ colormap = _get_cmap_instance(colormap)
+ return [colormap(num) for num in np.linspace(0, 1, num=num_colors)]
+
+
+def _get_cmap_instance(colormap: Union[str, "Colormap"]) -> "Colormap":
+ """Get instance of matplotlib colormap."""
+ if isinstance(colormap, str):
+ cmap = colormap
+ colormap = cm.get_cmap(colormap)
+ if colormap is None:
+ raise ValueError(f"Colormap {cmap} is not recognized")
+ return colormap
+
+
+def _get_colors_from_color(
+ color: Union[Color, Collection[Color]],
+) -> List[Color]:
+ """Get colors from user input color."""
+ if len(color) == 0:
+ raise ValueError(f"Invalid color argument: {color}")
+
+ if _is_single_color(color):
+ color = cast(Color, color)
+ return [color]
+
+ color = cast(Collection[Color], color)
+ return list(_gen_list_of_colors_from_iterable(color))
+
+
+def _is_single_color(color: Union[Color, Collection[Color]]) -> bool:
+ """Check if `color` is a single color, not a sequence of colors.
+
+ Single color is of these kinds:
+ - Named color "red", "C0", "firebrick"
+ - Alias "g"
+ - Sequence of floats, such as (0.1, 0.2, 0.3) or (0.1, 0.2, 0.3, 0.4).
+
+ See Also
+ --------
+ _is_single_string_color
+ """
+ if isinstance(color, str) and _is_single_string_color(color):
+ # GH #36972
+ return True
+
+ if _is_floats_color(color):
+ return True
+
+ return False
+
+
+def _gen_list_of_colors_from_iterable(color: Collection[Color]) -> Iterator[Color]:
+ """
+ Yield colors from string of several letters or from collection of colors.
+ """
+ for x in color:
+ if _is_single_color(x):
+ yield x
+ else:
+ raise ValueError(f"Invalid color {x}")
+
+
+def _is_floats_color(color: Union[Color, Collection[Color]]) -> bool:
+ """Check if color comprises a sequence of floats representing color."""
+ return bool(
+ is_list_like(color)
+ and (len(color) == 3 or len(color) == 4)
+ and all(isinstance(x, (int, float)) for x in color)
+ )
+
+
+def _get_colors_from_color_type(color_type: str, num_colors: int) -> List[Color]:
+ """Get colors from user input color type."""
+ if color_type == "default":
+ return _get_default_colors(num_colors)
+ elif color_type == "random":
+ return _get_random_colors(num_colors)
+ else:
+ raise ValueError("color_type must be either 'default' or 'random'")
+
+
+def _get_default_colors(num_colors: int) -> List[Color]:
+ """Get `num_colors` of default colors from matplotlib rc params."""
+ import matplotlib.pyplot as plt
+
+ colors = [c["color"] for c in plt.rcParams["axes.prop_cycle"]]
+ return colors[0:num_colors]
+
+
+def _get_random_colors(num_colors: int) -> List[Color]:
+ """Get `num_colors` of random colors."""
+ return [_random_color(num) for num in range(num_colors)]
+
+
+def _random_color(column: int) -> List[float]:
+ """Get a random color represented as a list of length 3"""
+ # GH17525 use common._random_state to avoid resetting the seed
+ rs = com.random_state(column)
+ return rs.rand(3).tolist()
+
+
+def _is_single_string_color(color: Color) -> bool:
+ """Check if `color` is a single string color.
- Examples of single colors:
+ Examples of single string colors:
- 'r'
- 'g'
- 'red'
- 'green'
- 'C3'
+ - 'firebrick'
Parameters
----------
- color : string
- Color string.
+ color : Color
+ Color string or sequence of floats.
Returns
-------
bool
- True if ``color`` looks like a valid color.
+ True if `color` looks like a valid color.
False otherwise.
"""
conv = matplotlib.colors.ColorConverter()
diff --git a/pandas/tests/plotting/test_style.py b/pandas/tests/plotting/test_style.py
new file mode 100644
index 0000000000000..665bda15724fd
--- /dev/null
+++ b/pandas/tests/plotting/test_style.py
@@ -0,0 +1,157 @@
+import pytest
+
+from pandas import Series
+
+pytest.importorskip("matplotlib")
+from pandas.plotting._matplotlib.style import get_standard_colors
+
+
+class TestGetStandardColors:
+ @pytest.mark.parametrize(
+ "num_colors, expected",
+ [
+ (3, ["red", "green", "blue"]),
+ (5, ["red", "green", "blue", "red", "green"]),
+ (7, ["red", "green", "blue", "red", "green", "blue", "red"]),
+ (2, ["red", "green"]),
+ (1, ["red"]),
+ ],
+ )
+ def test_default_colors_named_from_prop_cycle(self, num_colors, expected):
+ import matplotlib as mpl
+ from matplotlib.pyplot import cycler
+
+ mpl_params = {
+ "axes.prop_cycle": cycler(color=["red", "green", "blue"]),
+ }
+ with mpl.rc_context(rc=mpl_params):
+ result = get_standard_colors(num_colors=num_colors)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "num_colors, expected",
+ [
+ (1, ["b"]),
+ (3, ["b", "g", "r"]),
+ (4, ["b", "g", "r", "y"]),
+ (5, ["b", "g", "r", "y", "b"]),
+ (7, ["b", "g", "r", "y", "b", "g", "r"]),
+ ],
+ )
+ def test_default_colors_named_from_prop_cycle_string(self, num_colors, expected):
+ import matplotlib as mpl
+ from matplotlib.pyplot import cycler
+
+ mpl_params = {
+ "axes.prop_cycle": cycler(color="bgry"),
+ }
+ with mpl.rc_context(rc=mpl_params):
+ result = get_standard_colors(num_colors=num_colors)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "num_colors, expected_name",
+ [
+ (1, ["C0"]),
+ (3, ["C0", "C1", "C2"]),
+ (
+ 12,
+ [
+ "C0",
+ "C1",
+ "C2",
+ "C3",
+ "C4",
+ "C5",
+ "C6",
+ "C7",
+ "C8",
+ "C9",
+ "C0",
+ "C1",
+ ],
+ ),
+ ],
+ )
+ def test_default_colors_named_undefined_prop_cycle(self, num_colors, expected_name):
+ import matplotlib as mpl
+ import matplotlib.colors as mcolors
+
+ with mpl.rc_context(rc={}):
+ expected = [mcolors.to_hex(x) for x in expected_name]
+ result = get_standard_colors(num_colors=num_colors)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "num_colors, expected",
+ [
+ (1, ["red", "green", (0.1, 0.2, 0.3)]),
+ (2, ["red", "green", (0.1, 0.2, 0.3)]),
+ (3, ["red", "green", (0.1, 0.2, 0.3)]),
+ (4, ["red", "green", (0.1, 0.2, 0.3), "red"]),
+ ],
+ )
+ def test_user_input_color_sequence(self, num_colors, expected):
+ color = ["red", "green", (0.1, 0.2, 0.3)]
+ result = get_standard_colors(color=color, num_colors=num_colors)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "num_colors, expected",
+ [
+ (1, ["r", "g", "b", "k"]),
+ (2, ["r", "g", "b", "k"]),
+ (3, ["r", "g", "b", "k"]),
+ (4, ["r", "g", "b", "k"]),
+ (5, ["r", "g", "b", "k", "r"]),
+ (6, ["r", "g", "b", "k", "r", "g"]),
+ ],
+ )
+ def test_user_input_color_string(self, num_colors, expected):
+ color = "rgbk"
+ result = get_standard_colors(color=color, num_colors=num_colors)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "num_colors, expected",
+ [
+ (1, [(0.1, 0.2, 0.3)]),
+ (2, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]),
+ (3, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]),
+ ],
+ )
+ def test_user_input_color_floats(self, num_colors, expected):
+ color = (0.1, 0.2, 0.3)
+ result = get_standard_colors(color=color, num_colors=num_colors)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "color, num_colors, expected",
+ [
+ ("Crimson", 1, ["Crimson"]),
+ ("DodgerBlue", 2, ["DodgerBlue", "DodgerBlue"]),
+ ("firebrick", 3, ["firebrick", "firebrick", "firebrick"]),
+ ],
+ )
+ def test_user_input_named_color_string(self, color, num_colors, expected):
+ result = get_standard_colors(color=color, num_colors=num_colors)
+ assert result == expected
+
+ @pytest.mark.parametrize("color", ["", [], (), Series([], dtype="object")])
+ def test_empty_color_raises(self, color):
+ with pytest.raises(ValueError, match="Invalid color argument"):
+ get_standard_colors(color=color, num_colors=1)
+
+ @pytest.mark.parametrize(
+ "color",
+ [
+ "bad_color",
+ ("red", "green", "bad_color"),
+ (0.1,),
+ (0.1, 0.2),
+ (0.1, 0.2, 0.3, 0.4, 0.5), # must be either 3 or 4 floats
+ ],
+ )
+ def test_bad_color_raises(self, color):
+ with pytest.raises(ValueError, match="Invalid color"):
+ get_standard_colors(color=color, num_colors=5)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
There was a comment in the top of the module that the code was a bit too dynamic.
The logic in the function was too complex.
I refactored the code
- Extract functions
- Simplify logic
- Add type annotations
Note: there is one issue with mypy, which I do not know how to handle. Currently I suggested to ignore the error. | https://api.github.com/repos/pandas-dev/pandas/pulls/37203 | 2020-10-17T18:18:43Z | 2020-11-04T02:18:05Z | 2020-11-04T02:18:05Z | 2020-11-04T02:18:10Z |
DOC: update io-excel tag | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index d4be9d802d697..3a81b060cd73a 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -23,7 +23,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like
text;`JSON <https://www.json.org/>`__;:ref:`read_json<io.json_reader>`;:ref:`to_json<io.json_writer>`
text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>`
text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>`
- ;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>`
+ binary;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>`
binary;`OpenDocument <http://www.opendocumentformat.org>`__;:ref:`read_excel<io.ods>`;
binary;`HDF5 Format <https://support.hdfgroup.org/HDF5/whatishdf5.html>`__;:ref:`read_hdf<io.hdf5>`;:ref:`to_hdf<io.hdf5>`
binary;`Feather Format <https://github.com/wesm/feather>`__;:ref:`read_feather<io.feather>`;:ref:`to_feather<io.feather>`
| - [x] closes #37200
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37202 | 2020-10-17T17:14:41Z | 2020-10-17T18:36:33Z | 2020-10-17T18:36:33Z | 2020-10-17T18:36:37Z |
Backport PR #37181 on branch 1.1.x (BUG: Fix isin with read-only target) | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 6892fb62028c9..d5b6abd9f9de4 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -28,6 +28,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
- Bug causing ``groupby(...).sum()`` and similar to not preserve metadata (:issue:`29442`)
+- Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` raising a ``ValueError`` when the target was read-only (:issue:`37174`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index 0cc0a6b192df5..558963c38851e 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -208,7 +208,7 @@ def duplicated_{{dtype}}(const {{c_type}}[:] values, object keep='first'):
{{if dtype == 'object'}}
def ismember_{{dtype}}(ndarray[{{c_type}}] arr, ndarray[{{c_type}}] values):
{{else}}
-def ismember_{{dtype}}(const {{c_type}}[:] arr, {{c_type}}[:] values):
+def ismember_{{dtype}}(const {{c_type}}[:] arr, const {{c_type}}[:] values):
{{endif}}
"""
Return boolean of values in arr on an
diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py
index 35d45bd00131b..29a3a0106c56c 100644
--- a/pandas/tests/frame/methods/test_isin.py
+++ b/pandas/tests/frame/methods/test_isin.py
@@ -204,3 +204,12 @@ def test_isin_category_frame(self, values):
result = df.isin(values)
tm.assert_frame_equal(result, expected)
+
+ def test_isin_read_only(self):
+ # https://github.com/pandas-dev/pandas/issues/37174
+ arr = np.array([1, 2, 3])
+ arr.setflags(write=False)
+ df = DataFrame([1, 2, 3])
+ result = df.isin(arr)
+ expected = DataFrame([True, True, True])
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py
index 3836c1d56bf87..62766c692f4df 100644
--- a/pandas/tests/series/methods/test_isin.py
+++ b/pandas/tests/series/methods/test_isin.py
@@ -80,3 +80,12 @@ def test_isin_empty(self, empty):
result = s.isin(empty)
tm.assert_series_equal(expected, result)
+
+ def test_isin_read_only(self):
+ # https://github.com/pandas-dev/pandas/issues/37174
+ arr = np.array([1, 2, 3])
+ arr.setflags(write=False)
+ s = Series([1, 2, 3])
+ result = s.isin(arr)
+ expected = Series([True, True, True])
+ tm.assert_series_equal(result, expected)
| Backport PR #37181: BUG: Fix isin with read-only target | https://api.github.com/repos/pandas-dev/pandas/pulls/37201 | 2020-10-17T17:04:54Z | 2020-10-17T18:56:41Z | 2020-10-17T18:56:41Z | 2020-10-17T18:56:41Z |
BUG: Non deterministic level order in MultiIndex with join | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index c9a1dbd0ae90d..e841818c8ae05 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -474,6 +474,7 @@ Reshaping
- Bug in func :meth:`crosstab` when using multiple columns with ``margins=True`` and ``normalize=True`` (:issue:`35144`)
- Bug in :meth:`DataFrame.agg` with ``func={'name':<FUNC>}`` incorrectly raising ``TypeError`` when ``DataFrame.columns==['Name']`` (:issue:`36212`)
- Bug in :meth:`Series.transform` would give incorrect results or raise when the argument ``func`` was dictionary (:issue:`35811`)
+- Bug in :func:`join` returned a non deterministic level-order for the resulting :class:`MultiIndex` (:issue:`36910`)
-
Sparse
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 87dd15d5b142b..be65e59abad3e 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3581,8 +3581,12 @@ def _join_multi(self, other, how, return_indexers=True):
from pandas.core.reshape.merge import restore_dropped_levels_multijoin
# figure out join names
- self_names = set(com.not_none(*self.names))
- other_names = set(com.not_none(*other.names))
+ self_names_list = list(com.not_none(*self.names))
+ other_names_list = list(com.not_none(*other.names))
+ self_names_order = self_names_list.index
+ other_names_order = other_names_list.index
+ self_names = set(self_names_list)
+ other_names = set(other_names_list)
overlap = self_names & other_names
# need at least 1 in common
@@ -3592,8 +3596,8 @@ def _join_multi(self, other, how, return_indexers=True):
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
# Drop the non-matching levels from left and right respectively
- ldrop_names = list(self_names - overlap)
- rdrop_names = list(other_names - overlap)
+ ldrop_names = sorted(self_names - overlap, key=self_names_order)
+ rdrop_names = sorted(other_names - overlap, key=other_names_order)
# if only the order differs
if not len(ldrop_names + rdrop_names):
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index d4d4c4190417e..3c0cb0426ae16 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -879,3 +879,20 @@ def _join_by_hand(a, b, how="left"):
for col, s in b_re.items():
a_re[col] = s
return a_re.reindex(columns=result_columns)
+
+
+def test_join_inner_multiindex_deterministic_order():
+ # GH: 36910
+ left = pd.DataFrame(
+ data={"e": 5},
+ index=pd.MultiIndex.from_tuples([(1, 2, 4)], names=("a", "b", "d")),
+ )
+ right = pd.DataFrame(
+ data={"f": 6}, index=pd.MultiIndex.from_tuples([(2, 3)], names=("b", "c"))
+ )
+ result = left.join(right, how="inner")
+ expected = pd.DataFrame(
+ {"e": [5], "f": [6]},
+ index=pd.MultiIndex.from_tuples([(2, 1, 4, 3)], names=("b", "a", "d", "c")),
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #36910
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This fixes the issue, if the number should be deterministic. If not, we should add a note in the docs | https://api.github.com/repos/pandas-dev/pandas/pulls/37199 | 2020-10-17T16:24:09Z | 2020-10-23T00:20:33Z | 2020-10-23T00:20:33Z | 2020-10-25T19:19:14Z |
BUG: Regression in Resample.apply raised error when apply affected only a Series | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 6892fb62028c9..1abaf28e14af0 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -20,6 +20,7 @@ Fixed regressions
- Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`)
- Fixed regression in :meth:`Series.astype` converting ``None`` to ``"nan"`` when casting to string (:issue:`36904`)
- Fixed regression in :class:`RollingGroupby` causing a segmentation fault with Index of dtype object (:issue:`36727`)
+- Fixed regression in :meth:`DataFrame.resample(...).apply(...)` raised ``AttributeError`` when input was a :class:`DataFrame` and only a :class:`Series` was evaluated (:issue:`36951`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 3f1b1dac080a7..caef938272f6f 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -369,8 +369,9 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):
result = grouped._aggregate_item_by_item(how, *args, **kwargs)
else:
result = grouped.aggregate(how, *args, **kwargs)
- except DataError:
+ except (DataError, AttributeError, KeyError):
# we have a non-reducing function; try to evaluate
+ # alternatively we want to evaluate only a column of the input
result = grouped.apply(how, *args, **kwargs)
except ValueError as err:
if "Must produce aggregated value" in str(err):
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 73bf7dafac254..2f22be7c8cce9 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -347,3 +347,18 @@ def test_median_duplicate_columns():
result = df.resample("5s").median()
expected.columns = result.columns
tm.assert_frame_equal(result, expected)
+
+
+def test_apply_to_one_column_of_df():
+ # GH: 36951
+ df = pd.DataFrame(
+ {"col": range(10), "col1": range(10, 20)},
+ index=pd.date_range("2012-01-01", periods=10, freq="20min"),
+ )
+ result = df.resample("H").apply(lambda group: group.col.sum())
+ expected = pd.Series(
+ [3, 12, 21, 9], index=pd.date_range("2012-01-01", periods=4, freq="H")
+ )
+ tm.assert_series_equal(result, expected)
+ result = df.resample("H").apply(lambda group: group["col"].sum())
+ tm.assert_series_equal(result, expected)
| - [x] closes #36951
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Error was no longer caught as before.
Sorry made a mistake when pushing... | https://api.github.com/repos/pandas-dev/pandas/pulls/37198 | 2020-10-17T16:14:41Z | 2020-10-20T01:30:23Z | 2020-10-20T01:30:23Z | 2020-10-21T10:46:18Z |
BUG: JoinUnit.is_na wrong for CategoricalDtype | diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 02c9a76b70cd3..8559fe72972b8 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -217,9 +217,7 @@ def is_na(self) -> bool:
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
- if self.block.is_categorical:
- values_flat = values.categories
- elif is_sparse(self.block.values.dtype):
+ if is_sparse(self.block.values.dtype):
return False
elif self.block.is_extension:
# TODO(EA2D): no need for special case with 2D EAs
| - [x] closes #20833
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
JoinUnit.is_na is basically checking `isna(self.block.values).all()`. The check for is_categorical is an attempted optimization bc values.categories is often much smaller than values. But Categorical represents its NAs in its codes, not in its categories. So this will incorrectly always return False in the status quo.
Having trouble coming up with a useful test. I can adapt a test from test_concat that returns an incorrect answer from is_na, but that does not appear to affect the result of the higher-level pd.concat call. | https://api.github.com/repos/pandas-dev/pandas/pulls/37196 | 2020-10-17T15:38:41Z | 2020-10-20T01:28:50Z | 2020-10-20T01:28:50Z | 2020-10-20T01:48:35Z |
CLN: parametrize test_nat_comparisons | diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index cefd2ae7a9ddb..b0b8f1345e4d3 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -173,7 +173,26 @@ class TestDatetime64SeriesComparison:
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
- def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
+ @pytest.mark.parametrize(
+ "op, expected",
+ [
+ (operator.eq, Series([False, False, True])),
+ (operator.ne, Series([True, True, False])),
+ (operator.lt, Series([False, False, False])),
+ (operator.gt, Series([False, False, False])),
+ (operator.ge, Series([False, False, True])),
+ (operator.le, Series([False, False, True])),
+ ],
+ )
+ def test_nat_comparisons(
+ self,
+ dtype,
+ index_or_series,
+ reverse,
+ pair,
+ op,
+ expected,
+ ):
box = index_or_series
l, r = pair
if reverse:
@@ -182,25 +201,10 @@ def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
- # Series, Index
- expected = Series([False, False, True])
- tm.assert_series_equal(left == right, expected)
+ result = op(left, right)
- expected = Series([True, True, False])
- tm.assert_series_equal(left != right, expected)
-
- expected = Series([False, False, False])
- tm.assert_series_equal(left < right, expected)
-
- expected = Series([False, False, False])
- tm.assert_series_equal(left > right, expected)
-
- expected = Series([False, False, True])
- tm.assert_series_equal(left >= right, expected)
-
- expected = Series([False, False, True])
- tm.assert_series_equal(left <= right, expected)
+ tm.assert_series_equal(result, expected)
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
| Parametrize ``TestDatetime64SeriesComparison.test_nat_comparisons``
using ``operator`` module.
If this kind of cleanup is considered reasonable,
then I will go through the remaining tests in this test module.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37195 | 2020-10-17T15:35:50Z | 2020-11-04T17:25:27Z | 2020-11-04T17:25:27Z | 2020-11-06T15:36:53Z |
BUG: .loc with MultiIndex with names[1] = 0 | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index e4d97168692b3..8f023e2003231 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -430,6 +430,7 @@ Indexing
- Bug in indexing with boolean masks on datetime-like values sometimes returning a view instead of a copy (:issue:`36210`)
- Bug in :meth:`DataFrame.__getitem__` and :meth:`DataFrame.loc.__getitem__` with :class:`IntervalIndex` columns and a numeric indexer (:issue:`26490`)
- Bug in :meth:`Series.loc.__getitem__` with a non-unique :class:`MultiIndex` and an empty-list indexer (:issue:`13691`)
+- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`MultiIndex` with a level named "0" (:issue:`37194`)
Missing
^^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d658d799f1fb8..adb63323c4e25 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3684,7 +3684,9 @@ class animal locomotion
index = self.index
if isinstance(index, MultiIndex):
try:
- loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
+ loc, new_index = self.index._get_loc_level(
+ key, level=0, drop_level=drop_level
+ )
except TypeError as e:
raise TypeError(f"Expected label or tuple of labels, got {key}") from e
else:
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f336eec8c4cce..2aa1d9978173c 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1552,12 +1552,19 @@ def droplevel(self, level=0):
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
- if len(level) == 0:
+ return self._drop_level_numbers(levnums)
+
+ def _drop_level_numbers(self, levnums: List[int]):
+ """
+ Drop MultiIndex levels by level _number_, not name.
+ """
+
+ if len(levnums) == 0:
return self
- if len(level) >= self.nlevels:
+ if len(levnums) >= self.nlevels:
raise ValueError(
- f"Cannot remove {len(level)} levels from an index with {self.nlevels} "
- "levels: at least one level must be left."
+ f"Cannot remove {len(levnums)} levels from an index with "
+ f"{self.nlevels} levels: at least one level must be left."
)
# The two checks above guarantee that here self is a MultiIndex
self = cast("MultiIndex", self)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 4ba7dc58a3527..380df22861218 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2864,16 +2864,29 @@ def get_loc_level(self, key, level=0, drop_level: bool = True):
>>> mi.get_loc_level(['b', 'e'])
(1, None)
"""
+ if not isinstance(level, (list, tuple)):
+ level = self._get_level_number(level)
+ else:
+ level = [self._get_level_number(lev) for lev in level]
+ return self._get_loc_level(key, level=level, drop_level=drop_level)
+
+ def _get_loc_level(
+ self, key, level: Union[int, List[int]] = 0, drop_level: bool = True
+ ):
+ """
+ get_loc_level but with `level` known to be positional, not name-based.
+ """
+
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
# kludge around
orig_index = new_index = self[indexer]
- levels = [self._get_level_number(i) for i in levels]
+
for i in sorted(levels, reverse=True):
try:
- new_index = new_index.droplevel(i)
+ new_index = new_index._drop_level_numbers([i])
except ValueError:
# no dropping here
@@ -2887,7 +2900,7 @@ def maybe_mi_droplevels(indexer, levels, drop_level: bool):
)
result = None
for lev, k in zip(level, key):
- loc, new_index = self.get_loc_level(k, level=lev)
+ loc, new_index = self._get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
@@ -2897,8 +2910,6 @@ def maybe_mi_droplevels(indexer, levels, drop_level: bool):
return result, maybe_mi_droplevels(result, level, drop_level)
- level = self._get_level_number(level)
-
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
@@ -2963,7 +2974,8 @@ def partial_selection(key, indexer=None):
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
- def _get_level_indexer(self, key, level=0, indexer=None):
+ def _get_level_indexer(self, key, level: int = 0, indexer=None):
+ # `level` kwarg is _always_ positional, never name
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
@@ -3767,13 +3779,13 @@ def maybe_droplevels(index, key):
if isinstance(key, tuple):
for _ in key:
try:
- index = index.droplevel(0)
+ index = index._drop_level_numbers([0])
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
- index = index.droplevel(0)
+ index = index._drop_level_numbers([0])
except ValueError:
pass
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 03046f51d668a..0e466b49f6597 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -524,6 +524,30 @@ def test_loc_with_mi_indexer():
tm.assert_frame_equal(result, expected)
+def test_loc_mi_with_level1_named_0():
+ # GH#37194
+ dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
+
+ ser = Series(range(3), index=dti)
+ df = ser.to_frame()
+ df[1] = dti
+
+ df2 = df.set_index(0, append=True)
+ assert df2.index.names == (None, 0)
+ df2.index.get_loc(dti[0]) # smoke test
+
+ result = df2.loc[dti[0]]
+ expected = df2.iloc[[0]].droplevel(None)
+ tm.assert_frame_equal(result, expected)
+
+ ser2 = df2[1]
+ assert ser2.index.names == (None, 0)
+
+ result = ser2.loc[dti[0]]
+ expected = ser2.iloc[[0]].droplevel(None)
+ tm.assert_series_equal(result, expected)
+
+
def test_getitem_str_slice(datapath):
# GH#15928
path = datapath("reshape", "merge", "data", "quotes2.csv")
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37194 | 2020-10-17T15:05:46Z | 2020-10-23T00:17:00Z | 2020-10-23T00:17:00Z | 2020-10-23T00:54:28Z |
BUG: record warnings related to DatetimeIndex | diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 78aa1887f5611..66463a4a2358a 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -291,9 +291,16 @@ def test_irreg_hf(self):
_, ax = self.plt.subplots()
df2 = df.copy()
df2.index = df.index.astype(object)
- df2.plot(ax=ax)
- diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
- assert (np.fabs(diffs[1:] - sec) < 1e-8).all()
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ # This warning will be emitted
+ # pandas/core/frame.py:3216:
+ # FutureWarning: Automatically casting object-dtype Index of datetimes
+ # to DatetimeIndex is deprecated and will be removed in a future version.
+ # Explicitly cast to DatetimeIndex instead.
+ # return klass(values, index=self.index, name=name, fastpath=True)
+ df2.plot(ax=ax)
+ diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
+ assert (np.fabs(diffs[1:] - sec) < 1e-8).all()
def test_irregular_datetime64_repr_bug(self):
ser = tm.makeTimeSeries()
@@ -1028,9 +1035,16 @@ def test_irreg_dtypes(self):
# np.datetime64
idx = date_range("1/1/2000", periods=10)
idx = idx[[0, 2, 5, 9]].astype(object)
- df = DataFrame(np.random.randn(len(idx), 3), idx)
- _, ax = self.plt.subplots()
- _check_plot_works(df.plot, ax=ax)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ # This warning will be emitted
+ # pandas/core/frame.py:3216:
+ # FutureWarning: Automatically casting object-dtype Index of datetimes
+ # to DatetimeIndex is deprecated and will be removed in a future version.
+ # Explicitly cast to DatetimeIndex instead.
+ # return klass(values, index=self.index, name=name, fastpath=True)
+ df = DataFrame(np.random.randn(len(idx), 3), idx)
+ _, ax = self.plt.subplots()
+ _check_plot_works(df.plot, ax=ax)
@pytest.mark.slow
def test_time(self):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This PR handles warnings emitted when running tests in ``pandas/tests/plotting/test_datetimelike.py``.
Warnings looked like this:
```
pandas/tests/plotting/test_datetimelike.py::TestTSPlot::test_irreg_dtypes
/workspaces/pandas/pandas/core/frame.py:3216: FutureWarning: Automatically casting object-dtype Index of datetimes to DatetimeIndex is deprecated and will be removed in a future version. Explicitly cast to DatetimeIndex instead.
return klass(values, index=self.index, name=name, fastpath=True)
```
Presumably it started after implementing this: https://github.com/pandas-dev/pandas/pull/36697/
The reason for not using ``tm.assert_produces_warning`` is that when I use it,
then I get an error that the warning is raised with the incorrect stacklevel. | https://api.github.com/repos/pandas-dev/pandas/pulls/37193 | 2020-10-17T14:22:59Z | 2020-10-23T00:18:07Z | 2020-10-23T00:18:07Z | 2020-11-06T15:32:27Z |
DOC: fix doc isna same as notna | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 784e8877ef128..c3fd79a2644aa 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7309,7 +7309,7 @@ def isna(self: FrameOrSeries) -> FrameOrSeries:
-------
{klass}
Mask of bool values for each element in {klass} that
- indicates whether an element is not an NA value.
+ indicates whether an element is an NA value.
See Also
--------
| - [x] closes #37175
| https://api.github.com/repos/pandas-dev/pandas/pulls/37192 | 2020-10-17T13:04:26Z | 2020-10-17T16:00:54Z | 2020-10-17T16:00:53Z | 2020-10-17T16:01:50Z |
Backport PR #37144 on branch 1.1.x: BLD: update build requirement for py39 #37135 | diff --git a/pyproject.toml b/pyproject.toml
index 098a38958b5cc..5b3c3fd598b2f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -7,10 +7,11 @@ requires = [
"Cython>=0.29.21,<3", # Note: sync with setup.py
"numpy==1.15.4; python_version=='3.6' and platform_system!='AIX'",
"numpy==1.15.4; python_version=='3.7' and platform_system!='AIX'",
- "numpy==1.17.3; python_version>='3.8' and platform_system!='AIX'",
+ "numpy==1.17.3; python_version=='3.8' and platform_system!='AIX'",
"numpy==1.16.0; python_version=='3.6' and platform_system=='AIX'",
"numpy==1.16.0; python_version=='3.7' and platform_system=='AIX'",
- "numpy==1.17.3; python_version>='3.8' and platform_system=='AIX'",
+ "numpy==1.17.3; python_version=='3.8' and platform_system=='AIX'",
+ "numpy; python_version>='3.9'",
]
[tool.black]
| Backport PR #37144 on branch 1.1.x | https://api.github.com/repos/pandas-dev/pandas/pulls/37189 | 2020-10-17T11:27:04Z | 2020-10-17T13:27:53Z | 2020-10-17T13:27:53Z | 2020-10-17T13:27:57Z |
CI: Check for inconsistent pandas namespace usage | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index a50a71bbbad63..ab44598e04440 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -37,6 +37,12 @@ function invgrep {
return $((! $EXIT_STATUS))
}
+function check_namespace {
+ local -r CLASS="${1}"
+ grep -R -l --include "*.py" " ${CLASS}(" pandas/tests | xargs grep -n "pd\.${CLASS}("
+ test $? -gt 0
+}
+
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
FLAKE8_FORMAT="##[error]%(path)s:%(row)s:%(col)s:%(code)s:%(text)s"
INVGREP_PREPEND="##[error]"
@@ -195,6 +201,11 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
MSG='Check code for instances of os.remove' ; echo $MSG
invgrep -R --include="*.py*" --exclude "common.py" --exclude "test_writers.py" --exclude "test_store.py" -E "os\.remove" pandas/tests/
RET=$(($RET + $?)) ; echo $MSG "DONE"
+
+ MSG='Check for inconsistent use of pandas namespace in tests' ; echo $MSG
+ check_namespace "Series"
+ RET=$(($RET + $?))
+ echo $MSG "DONE"
fi
### CODE ###
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index c0ae36017f47a..f9dd4a7445a99 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -140,11 +140,11 @@ def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
ts = pd.Timestamp.now(tz)
- ser = pd.Series([ts, pd.NaT])
+ ser = Series([ts, pd.NaT])
obj = tm.box_expected(ser, box)
- expected = pd.Series([True, False], dtype=np.bool_)
+ expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
@@ -278,7 +278,7 @@ def test_series_comparison_scalars(self, val):
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
- ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
+ ser = Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp("nat")
@@ -313,7 +313,7 @@ def test_dt64arr_timestamp_equality(self, box_with_array):
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
- ser = pd.Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"])
+ ser = Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
@@ -973,7 +973,7 @@ def test_dt64arr_sub_timestamp(self, box_with_array):
ser = tm.box_expected(ser, box_with_array)
- delta_series = pd.Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
+ delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
@@ -985,7 +985,7 @@ def test_dt64arr_sub_NaT(self, box_with_array):
ser = tm.box_expected(dti, box_with_array)
result = ser - pd.NaT
- expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
+ expected = Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
@@ -993,7 +993,7 @@ def test_dt64arr_sub_NaT(self, box_with_array):
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - pd.NaT
- expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
+ expected = Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
@@ -1606,7 +1606,7 @@ def test_dt64_series_arith_overflow(self):
dt = pd.Timestamp("1700-01-31")
td = pd.Timedelta("20000 Days")
dti = pd.date_range("1949-09-30", freq="100Y", periods=4)
- ser = pd.Series(dti)
+ ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
@@ -1618,7 +1618,7 @@ def test_dt64_series_arith_overflow(self):
td + ser
ser.iloc[-1] = pd.NaT
- expected = pd.Series(
+ expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
@@ -1627,9 +1627,7 @@ def test_dt64_series_arith_overflow(self):
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
- expected = pd.Series(
- ["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]"
- )
+ expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
@@ -1830,8 +1828,8 @@ def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range("1999-09-30", periods=10, tz="US/Pacific")
- ser = pd.Series(dti)
- expected = pd.Series(pd.TimedeltaIndex(["0days"] * 10))
+ ser = Series(dti)
+ expected = Series(pd.TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py
index 03cc4fe2bdcb5..30a23d8563ef8 100644
--- a/pandas/tests/arithmetic/test_interval.py
+++ b/pandas/tests/arithmetic/test_interval.py
@@ -290,6 +290,6 @@ def test_index_series_compat(self, op, constructor, expected_type, assert_func):
def test_comparison_operations(self, scalars):
# GH #28981
expected = Series([False, False])
- s = pd.Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")
+ s = Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")
result = s == scalars
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 8b108a2f1a2b3..9716cffd626a8 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -42,7 +42,7 @@ def adjust_negative_zero(zero, expected):
# TODO: remove this kludge once mypy stops giving false positives here
# List comprehension has incompatible type List[PandasObject]; expected List[RangeIndex]
# See GH#29725
-ser_or_index: List[Any] = [pd.Series, pd.Index]
+ser_or_index: List[Any] = [Series, pd.Index]
lefts: List[Any] = [pd.RangeIndex(10, 40, 10)]
lefts.extend(
[
@@ -59,14 +59,14 @@ def adjust_negative_zero(zero, expected):
class TestNumericComparisons:
def test_operator_series_comparison_zerorank(self):
# GH#13006
- result = np.float64(0) > pd.Series([1, 2, 3])
- expected = 0.0 > pd.Series([1, 2, 3])
+ result = np.float64(0) > Series([1, 2, 3])
+ expected = 0.0 > Series([1, 2, 3])
tm.assert_series_equal(result, expected)
- result = pd.Series([1, 2, 3]) < np.float64(0)
- expected = pd.Series([1, 2, 3]) < 0.0
+ result = Series([1, 2, 3]) < np.float64(0)
+ expected = Series([1, 2, 3]) < 0.0
tm.assert_series_equal(result, expected)
- result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
- expected = 0.0 > pd.Series([1, 2, 3])
+ result = np.array([0, 1, 2])[0] > Series([0, 1, 2])
+ expected = 0.0 > Series([1, 2, 3])
tm.assert_series_equal(result, expected)
def test_df_numeric_cmp_dt64_raises(self):
@@ -92,8 +92,8 @@ def test_df_numeric_cmp_dt64_raises(self):
def test_compare_invalid(self):
# GH#8058
# ops testing
- a = pd.Series(np.random.randn(5), name=0)
- b = pd.Series(np.random.randn(5))
+ a = Series(np.random.randn(5), name=0)
+ b = Series(np.random.randn(5))
b.name = pd.Timestamp("2000-01-01")
tm.assert_series_equal(a / b, 1 / (b / a))
@@ -102,12 +102,12 @@ def test_numeric_cmp_string_numexpr_path(self, box_with_array):
box = box_with_array
xbox = box if box is not pd.Index else np.ndarray
- obj = pd.Series(np.random.randn(10 ** 5))
+ obj = Series(np.random.randn(10 ** 5))
obj = tm.box_expected(obj, box, transpose=False)
result = obj == "a"
- expected = pd.Series(np.zeros(10 ** 5, dtype=bool))
+ expected = Series(np.zeros(10 ** 5, dtype=bool))
expected = tm.box_expected(expected, xbox, transpose=False)
tm.assert_equal(result, expected)
@@ -126,7 +126,7 @@ def test_numeric_cmp_string_numexpr_path(self, box_with_array):
class TestNumericArraylikeArithmeticWithDatetimeLike:
# TODO: also check name retentention
- @pytest.mark.parametrize("box_cls", [np.array, pd.Index, pd.Series])
+ @pytest.mark.parametrize("box_cls", [np.array, pd.Index, Series])
@pytest.mark.parametrize(
"left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype)
)
@@ -136,8 +136,8 @@ def test_mul_td64arr(self, left, box_cls):
right = box_cls(right)
expected = pd.TimedeltaIndex(["10s", "40s", "90s"])
- if isinstance(left, pd.Series) or box_cls is pd.Series:
- expected = pd.Series(expected)
+ if isinstance(left, Series) or box_cls is Series:
+ expected = Series(expected)
result = left * right
tm.assert_equal(result, expected)
@@ -146,7 +146,7 @@ def test_mul_td64arr(self, left, box_cls):
tm.assert_equal(result, expected)
# TODO: also check name retentention
- @pytest.mark.parametrize("box_cls", [np.array, pd.Index, pd.Series])
+ @pytest.mark.parametrize("box_cls", [np.array, pd.Index, Series])
@pytest.mark.parametrize(
"left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype)
)
@@ -156,8 +156,8 @@ def test_div_td64arr(self, left, box_cls):
right = box_cls(right)
expected = pd.TimedeltaIndex(["1s", "2s", "3s"])
- if isinstance(left, pd.Series) or box_cls is pd.Series:
- expected = pd.Series(expected)
+ if isinstance(left, Series) or box_cls is Series:
+ expected = Series(expected)
result = right / left
tm.assert_equal(result, expected)
@@ -402,8 +402,8 @@ def test_ser_div_ser(self, dtype1, any_real_dtype):
def test_ser_divmod_zero(self, dtype1, any_real_dtype):
# GH#26987
dtype2 = any_real_dtype
- left = pd.Series([1, 1]).astype(dtype1)
- right = pd.Series([0, 2]).astype(dtype2)
+ left = Series([1, 1]).astype(dtype1)
+ right = Series([0, 2]).astype(dtype2)
# GH#27321 pandas convention is to set 1 // 0 to np.inf, as opposed
# to numpy which sets to np.nan; patch `expected[0]` below
@@ -422,8 +422,8 @@ def test_ser_divmod_zero(self, dtype1, any_real_dtype):
tm.assert_series_equal(result[1], expected[1])
def test_ser_divmod_inf(self):
- left = pd.Series([np.inf, 1.0])
- right = pd.Series([np.inf, 2.0])
+ left = Series([np.inf, 1.0])
+ right = Series([np.inf, 2.0])
expected = left // right, left % right
result = divmod(left, right)
@@ -480,8 +480,8 @@ def test_df_div_zero_df(self):
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
result = df / df
- first = pd.Series([1.0, 1.0, 1.0, 1.0])
- second = pd.Series([np.nan, np.nan, np.nan, 1])
+ first = Series([1.0, 1.0, 1.0, 1.0])
+ second = Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({"first": first, "second": second})
tm.assert_frame_equal(result, expected)
@@ -489,8 +489,8 @@ def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
- first = pd.Series([1.0, 1.0, 1.0, 1.0])
- second = pd.Series([np.nan, np.nan, np.nan, 1])
+ first = Series([1.0, 1.0, 1.0, 1.0])
+ second = Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({"first": first, "second": second})
with np.errstate(all="ignore"):
@@ -530,8 +530,8 @@ def test_df_mod_zero_df(self):
# this is technically wrong, as the integer portion is coerced to float
# ###
- first = pd.Series([0, 0, 0, 0], dtype="float64")
- second = pd.Series([np.nan, np.nan, np.nan, 0])
+ first = Series([0, 0, 0, 0], dtype="float64")
+ second = Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({"first": first, "second": second})
result = df % df
tm.assert_frame_equal(result, expected)
@@ -542,8 +542,8 @@ def test_df_mod_zero_array(self):
# this is technically wrong, as the integer portion is coerced to float
# ###
- first = pd.Series([0, 0, 0, 0], dtype="float64")
- second = pd.Series([np.nan, np.nan, np.nan, 0])
+ first = Series([0, 0, 0, 0], dtype="float64")
+ second = Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({"first": first, "second": second})
# numpy has a slightly different (wrong) treatment
@@ -812,14 +812,14 @@ class TestAdditionSubtraction:
"first, second, expected",
[
(
- pd.Series([1, 2, 3], index=list("ABC"), name="x"),
- pd.Series([2, 2, 2], index=list("ABD"), name="x"),
- pd.Series([3.0, 4.0, np.nan, np.nan], index=list("ABCD"), name="x"),
+ Series([1, 2, 3], index=list("ABC"), name="x"),
+ Series([2, 2, 2], index=list("ABD"), name="x"),
+ Series([3.0, 4.0, np.nan, np.nan], index=list("ABCD"), name="x"),
),
(
- pd.Series([1, 2, 3], index=list("ABC"), name="x"),
- pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x"),
- pd.Series([3, 4, 5, np.nan], index=list("ABCD"), name="x"),
+ Series([1, 2, 3], index=list("ABC"), name="x"),
+ Series([2, 2, 2, 2], index=list("ABCD"), name="x"),
+ Series([3, 4, 5, np.nan], index=list("ABCD"), name="x"),
),
],
)
@@ -851,7 +851,7 @@ def test_add_frames(self, first, second, expected):
# TODO: This came from series.test.test_operators, needs cleanup
def test_series_frame_radd_bug(self):
# GH#353
- vals = pd.Series(tm.rands_array(5, 10))
+ vals = Series(tm.rands_array(5, 10))
result = "foo_" + vals
expected = vals.map(lambda x: "foo_" + x)
tm.assert_series_equal(result, expected)
@@ -876,14 +876,14 @@ def test_series_frame_radd_bug(self):
# TODO: This came from series.test.test_operators, needs cleanup
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
- ser = pd.Series(np.random.randn(5))
+ ser = Series(np.random.randn(5))
expected = ser - ser.index.to_series()
result = ser - ser.index
tm.assert_series_equal(result, expected)
# GH#4629
# arithmetic datetime64 ops with an index
- ser = pd.Series(
+ ser = Series(
pd.date_range("20130101", periods=5),
index=pd.date_range("20130101", periods=5),
)
@@ -910,7 +910,7 @@ def test_frame_operators(self, float_frame):
frame2 = pd.DataFrame(float_frame, columns=["D", "C", "B", "A"])
garbage = np.random.random(4)
- colSeries = pd.Series(garbage, index=np.array(frame.columns))
+ colSeries = Series(garbage, index=np.array(frame.columns))
idSum = frame + frame
seriesSum = frame + colSeries
@@ -1033,8 +1033,8 @@ def test_series_divmod_zero(self):
other = tser * 0
result = divmod(tser, other)
- exp1 = pd.Series([np.inf] * len(tser), index=tser.index, name="ts")
- exp2 = pd.Series([np.nan] * len(tser), index=tser.index, name="ts")
+ exp1 = Series([np.inf] * len(tser), index=tser.index, name="ts")
+ exp2 = Series([np.nan] * len(tser), index=tser.index, name="ts")
tm.assert_series_equal(result[0], exp1)
tm.assert_series_equal(result[1], exp2)
@@ -1042,10 +1042,10 @@ def test_series_divmod_zero(self):
class TestUFuncCompat:
@pytest.mark.parametrize(
"holder",
- [pd.Int64Index, pd.UInt64Index, pd.Float64Index, pd.RangeIndex, pd.Series],
+ [pd.Int64Index, pd.UInt64Index, pd.Float64Index, pd.RangeIndex, Series],
)
def test_ufunc_compat(self, holder):
- box = pd.Series if holder is pd.Series else pd.Index
+ box = Series if holder is Series else pd.Index
if holder is pd.RangeIndex:
idx = pd.RangeIndex(0, 5)
@@ -1056,11 +1056,11 @@ def test_ufunc_compat(self, holder):
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
- "holder", [pd.Int64Index, pd.UInt64Index, pd.Float64Index, pd.Series]
+ "holder", [pd.Int64Index, pd.UInt64Index, pd.Float64Index, Series]
)
def test_ufunc_coercions(self, holder):
idx = holder([1, 2, 3, 4, 5], name="x")
- box = pd.Series if holder is pd.Series else pd.Index
+ box = Series if holder is Series else pd.Index
result = np.sqrt(idx)
assert result.dtype == "f8" and isinstance(result, box)
@@ -1100,11 +1100,11 @@ def test_ufunc_coercions(self, holder):
tm.assert_equal(result, exp)
@pytest.mark.parametrize(
- "holder", [pd.Int64Index, pd.UInt64Index, pd.Float64Index, pd.Series]
+ "holder", [pd.Int64Index, pd.UInt64Index, pd.Float64Index, Series]
)
def test_ufunc_multiple_return_values(self, holder):
obj = holder([1, 2, 3], name="x")
- box = pd.Series if holder is pd.Series else pd.Index
+ box = Series if holder is Series else pd.Index
result = np.modf(obj)
assert isinstance(result, tuple)
@@ -1114,9 +1114,9 @@ def test_ufunc_multiple_return_values(self, holder):
tm.assert_equal(result[1], tm.box_expected(exp2, box))
def test_ufunc_at(self):
- s = pd.Series([0, 1, 2], index=[1, 2, 3], name="x")
+ s = Series([0, 1, 2], index=[1, 2, 3], name="x")
np.add.at(s, [0, 2], 10)
- expected = pd.Series([10, 1, 12], index=[1, 2, 3], name="x")
+ expected = Series([10, 1, 12], index=[1, 2, 3], name="x")
tm.assert_series_equal(s, expected)
@@ -1126,8 +1126,8 @@ class TestObjectDtypeEquivalence:
@pytest.mark.parametrize("dtype", [None, object])
def test_numarr_with_dtype_add_nan(self, dtype, box_with_array):
box = box_with_array
- ser = pd.Series([1, 2, 3], dtype=dtype)
- expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
+ ser = Series([1, 2, 3], dtype=dtype)
+ expected = Series([np.nan, np.nan, np.nan], dtype=dtype)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
@@ -1141,8 +1141,8 @@ def test_numarr_with_dtype_add_nan(self, dtype, box_with_array):
@pytest.mark.parametrize("dtype", [None, object])
def test_numarr_with_dtype_add_int(self, dtype, box_with_array):
box = box_with_array
- ser = pd.Series([1, 2, 3], dtype=dtype)
- expected = pd.Series([2, 3, 4], dtype=dtype)
+ ser = Series([1, 2, 3], dtype=dtype)
+ expected = Series([2, 3, 4], dtype=dtype)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
@@ -1160,7 +1160,7 @@ def test_numarr_with_dtype_add_int(self, dtype, box_with_array):
)
def test_operators_reverse_object(self, op):
# GH#56
- arr = pd.Series(np.random.randn(10), index=np.arange(10), dtype=object)
+ arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
result = op(1.0, arr)
expected = op(1.0, arr.astype(float))
@@ -1224,9 +1224,9 @@ def test_arithmetic_with_frame_or_series(self, op):
# check that we return NotImplemented when operating with Series
# or DataFrame
index = pd.RangeIndex(5)
- other = pd.Series(np.random.randn(5))
+ other = Series(np.random.randn(5))
- expected = op(pd.Series(index), other)
+ expected = op(Series(index), other)
result = op(index, other)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py
index e0c03f28f7af5..ddd14af0918de 100644
--- a/pandas/tests/arithmetic/test_object.py
+++ b/pandas/tests/arithmetic/test_object.py
@@ -95,8 +95,8 @@ def test_add_extension_scalar(self, other, box_with_array, op):
# Check that scalars satisfying is_extension_array_dtype(obj)
# do not incorrectly try to dispatch to an ExtensionArray operation
- arr = pd.Series(["a", "b", "c"])
- expected = pd.Series([op(x, other) for x in arr])
+ arr = Series(["a", "b", "c"])
+ expected = Series([op(x, other) for x in arr])
arr = tm.box_expected(arr, box_with_array)
expected = tm.box_expected(expected, box_with_array)
@@ -105,8 +105,8 @@ def test_add_extension_scalar(self, other, box_with_array, op):
tm.assert_equal(result, expected)
def test_objarr_add_str(self, box_with_array):
- ser = pd.Series(["x", np.nan, "x"])
- expected = pd.Series(["xa", np.nan, "xa"])
+ ser = Series(["x", np.nan, "x"])
+ expected = Series(["xa", np.nan, "xa"])
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
@@ -115,8 +115,8 @@ def test_objarr_add_str(self, box_with_array):
tm.assert_equal(result, expected)
def test_objarr_radd_str(self, box_with_array):
- ser = pd.Series(["x", np.nan, "x"])
- expected = pd.Series(["ax", np.nan, "ax"])
+ ser = Series(["x", np.nan, "x"])
+ expected = Series(["ax", np.nan, "ax"])
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
@@ -166,22 +166,22 @@ def test_objarr_add_invalid(self, op, box_with_array):
def test_operators_na_handling(self):
ser = Series(["foo", "bar", "baz", np.nan])
result = "prefix_" + ser
- expected = pd.Series(["prefix_foo", "prefix_bar", "prefix_baz", np.nan])
+ expected = Series(["prefix_foo", "prefix_bar", "prefix_baz", np.nan])
tm.assert_series_equal(result, expected)
result = ser + "_suffix"
- expected = pd.Series(["foo_suffix", "bar_suffix", "baz_suffix", np.nan])
+ expected = Series(["foo_suffix", "bar_suffix", "baz_suffix", np.nan])
tm.assert_series_equal(result, expected)
# TODO: parametrize over box
@pytest.mark.parametrize("dtype", [None, object])
def test_series_with_dtype_radd_timedelta(self, dtype):
# note this test is _not_ aimed at timedelta64-dtyped Series
- ser = pd.Series(
+ ser = Series(
[pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")],
dtype=dtype,
)
- expected = pd.Series(
+ expected = Series(
[pd.Timedelta("4 days"), pd.Timedelta("5 days"), pd.Timedelta("6 days")]
)
@@ -194,7 +194,7 @@ def test_series_with_dtype_radd_timedelta(self, dtype):
# TODO: cleanup & parametrize over box
def test_mixed_timezone_series_ops_object(self):
# GH#13043
- ser = pd.Series(
+ ser = Series(
[
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-01", tz="Asia/Tokyo"),
@@ -203,7 +203,7 @@ def test_mixed_timezone_series_ops_object(self):
)
assert ser.dtype == object
- exp = pd.Series(
+ exp = Series(
[
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="Asia/Tokyo"),
@@ -214,7 +214,7 @@ def test_mixed_timezone_series_ops_object(self):
tm.assert_series_equal(pd.Timedelta("1 days") + ser, exp)
# object series & object series
- ser2 = pd.Series(
+ ser2 = Series(
[
pd.Timestamp("2015-01-03", tz="US/Eastern"),
pd.Timestamp("2015-01-05", tz="Asia/Tokyo"),
@@ -222,27 +222,25 @@ def test_mixed_timezone_series_ops_object(self):
name="xxx",
)
assert ser2.dtype == object
- exp = pd.Series([pd.Timedelta("2 days"), pd.Timedelta("4 days")], name="xxx")
+ exp = Series([pd.Timedelta("2 days"), pd.Timedelta("4 days")], name="xxx")
tm.assert_series_equal(ser2 - ser, exp)
tm.assert_series_equal(ser - ser2, -exp)
- ser = pd.Series(
+ ser = Series(
[pd.Timedelta("01:00:00"), pd.Timedelta("02:00:00")],
name="xxx",
dtype=object,
)
assert ser.dtype == object
- exp = pd.Series(
- [pd.Timedelta("01:30:00"), pd.Timedelta("02:30:00")], name="xxx"
- )
+ exp = Series([pd.Timedelta("01:30:00"), pd.Timedelta("02:30:00")], name="xxx")
tm.assert_series_equal(ser + pd.Timedelta("00:30:00"), exp)
tm.assert_series_equal(pd.Timedelta("00:30:00") + ser, exp)
# TODO: cleanup & parametrize over box
def test_iadd_preserves_name(self):
# GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name
- ser = pd.Series([1, 2, 3])
+ ser = Series([1, 2, 3])
ser.index.name = "foo"
ser.index += 1
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index e78e696d00398..f02259a1c7e62 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -449,10 +449,10 @@ def _check(self, values, func, expected):
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
- s = pd.Series(values)
+ s = Series(values)
result = func(s)
- exp = pd.Series(expected, name=values.name)
+ exp = Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
@@ -1247,13 +1247,13 @@ def test_parr_add_sub_object_array(self):
class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
# GH#13043
- ser = pd.Series(
+ ser = Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
- expected = pd.Series(
+ expected = Series(
[pd.Period("2015-01-02", freq="D"), pd.Period("2015-01-03", freq="D")],
name="xxx",
)
@@ -1272,7 +1272,7 @@ def test_ops_series_timedelta(self):
def test_ops_series_period(self):
# GH#13043
- ser = pd.Series(
+ ser = Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
@@ -1281,17 +1281,17 @@ def test_ops_series_period(self):
per = pd.Period("2015-01-10", freq="D")
off = per.freq
# dtype will be object because of original dtype
- expected = pd.Series([9 * off, 8 * off], name="xxx", dtype=object)
+ expected = Series([9 * off, 8 * off], name="xxx", dtype=object)
tm.assert_series_equal(per - ser, expected)
tm.assert_series_equal(ser - per, -1 * expected)
- s2 = pd.Series(
+ s2 = Series(
[pd.Period("2015-01-05", freq="D"), pd.Period("2015-01-04", freq="D")],
name="xxx",
)
assert s2.dtype == "Period[D]"
- expected = pd.Series([4 * off, 2 * off], name="xxx", dtype=object)
+ expected = Series([4 * off, 2 * off], name="xxx", dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -1 * expected)
@@ -1304,10 +1304,10 @@ def _check(self, values, func, expected):
result = func(idx)
tm.assert_equal(result, expected)
- ser = pd.Series(values)
+ ser = Series(values)
result = func(ser)
- exp = pd.Series(expected, name=values.name)
+ exp = Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
@@ -1455,7 +1455,7 @@ def test_pi_offset_errors(self):
freq="D",
name="idx",
)
- ser = pd.Series(idx)
+ ser = Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 90a3ed6d75393..cd6a430829442 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -77,10 +77,10 @@ def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):
box = box_with_array
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
- ser = pd.Series([timedelta(days=1), timedelta(days=2)])
+ ser = Series([timedelta(days=1), timedelta(days=2)])
ser = tm.box_expected(ser, box)
actual = ser > td_scalar
- expected = pd.Series([False, True])
+ expected = Series([False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(actual, expected)
@@ -1104,7 +1104,7 @@ def test_td64arr_sub_periodlike(self, box_with_array, tdi_freq, pi_freq):
)
def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other):
# vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid
- tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
+ tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
assert_invalid_addsub_type(tdarr, other)
@@ -1122,7 +1122,7 @@ def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other):
def test_td64arr_addsub_numeric_arr_invalid(
self, box_with_array, vec, any_real_dtype
):
- tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
+ tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
vector = vec.astype(any_real_dtype)
@@ -1556,7 +1556,7 @@ def test_tdi_mul_int_series(self, box_with_array):
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
- result = idx * pd.Series(np.arange(5, dtype="int64"))
+ result = idx * Series(np.arange(5, dtype="int64"))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
@@ -1765,8 +1765,8 @@ def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
- left = pd.Series([1000, 222330, 30], dtype="timedelta64[ns]")
- right = pd.Series([1000, 222330, None], dtype="timedelta64[ns]")
+ left = Series([1000, 222330, 30], dtype="timedelta64[ns]")
+ right = Series([1000, 222330, None], dtype="timedelta64[ns]")
left = tm.box_expected(left, box)
right = tm.box_expected(right, box)
@@ -1988,7 +1988,7 @@ def test_td64arr_mul_td64arr_raises(self, box_with_array):
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
- tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
+ tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
@@ -2011,7 +2011,7 @@ def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
- tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
+ tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
@@ -2033,7 +2033,7 @@ def test_td64arr_rmul_numeric_array(self, box_with_array, vector, any_real_dtype
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
- tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
+ tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_dtype)
expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]")
@@ -2057,7 +2057,7 @@ def test_td64arr_div_numeric_array(self, box_with_array, vector, any_real_dtype)
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
- tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
+ tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_dtype)
expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]")
diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py
index 2c4dd8fe64057..91992da594288 100644
--- a/pandas/tests/arrays/categorical/test_indexing.py
+++ b/pandas/tests/arrays/categorical/test_indexing.py
@@ -200,40 +200,38 @@ def test_get_indexer_non_unique(self, idx_values, key_values, key_class):
tm.assert_numpy_array_equal(exp_miss, res_miss)
def test_where_unobserved_nan(self):
- ser = pd.Series(pd.Categorical(["a", "b"]))
+ ser = Series(pd.Categorical(["a", "b"]))
result = ser.where([True, False])
- expected = pd.Series(pd.Categorical(["a", None], categories=["a", "b"]))
+ expected = Series(pd.Categorical(["a", None], categories=["a", "b"]))
tm.assert_series_equal(result, expected)
# all NA
- ser = pd.Series(pd.Categorical(["a", "b"]))
+ ser = Series(pd.Categorical(["a", "b"]))
result = ser.where([False, False])
- expected = pd.Series(pd.Categorical([None, None], categories=["a", "b"]))
+ expected = Series(pd.Categorical([None, None], categories=["a", "b"]))
tm.assert_series_equal(result, expected)
def test_where_unobserved_categories(self):
- ser = pd.Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"]))
+ ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"]))
result = ser.where([True, True, False], other="b")
- expected = pd.Series(
- Categorical(["a", "b", "b"], categories=ser.cat.categories)
- )
+ expected = Series(Categorical(["a", "b", "b"], categories=ser.cat.categories))
tm.assert_series_equal(result, expected)
def test_where_other_categorical(self):
- ser = pd.Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"]))
+ ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"]))
other = Categorical(["b", "c", "a"], categories=["a", "c", "b", "d"])
result = ser.where([True, False, True], other)
- expected = pd.Series(Categorical(["a", "c", "c"], dtype=ser.dtype))
+ expected = Series(Categorical(["a", "c", "c"], dtype=ser.dtype))
tm.assert_series_equal(result, expected)
def test_where_new_category_raises(self):
- ser = pd.Series(Categorical(["a", "b", "c"]))
+ ser = Series(Categorical(["a", "b", "c"]))
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(ValueError, match=msg):
ser.where([True, False, True], "d")
def test_where_ordered_differs_rasies(self):
- ser = pd.Series(
+ ser = Series(
Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"], ordered=True)
)
other = Categorical(
diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py
index 26ad6fc1c6572..7867d882befa0 100644
--- a/pandas/tests/base/test_conversion.py
+++ b/pandas/tests/base/test_conversion.py
@@ -208,7 +208,7 @@ def test_iter_box(self):
],
)
def test_values_consistent(array, expected_type, dtype):
- l_values = pd.Series(array)._values
+ l_values = Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
@@ -218,14 +218,14 @@ def test_values_consistent(array, expected_type, dtype):
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
- ser = pd.Series(arr)
+ ser = Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
- ser = pd.Series(dtype=any_numpy_dtype)
+ ser = Series(dtype=any_numpy_dtype)
result = ser.array
if is_datetime64_dtype(any_numpy_dtype):
assert isinstance(result, DatetimeArray)
@@ -336,7 +336,7 @@ def test_to_numpy(array, expected, index_or_series):
def test_to_numpy_copy(arr, as_series):
obj = pd.Index(arr, copy=False)
if as_series:
- obj = pd.Series(obj.values, copy=False)
+ obj = Series(obj.values, copy=False)
# no copy by default
result = obj.to_numpy()
@@ -355,7 +355,7 @@ def test_to_numpy_dtype(as_series):
tz = "US/Eastern"
obj = pd.DatetimeIndex(["2000", "2001"], tz=tz)
if as_series:
- obj = pd.Series(obj)
+ obj = Series(obj)
# preserve tz by default
result = obj.to_numpy()
@@ -395,13 +395,13 @@ def test_to_numpy_na_value_numpy_dtype(
def test_to_numpy_kwargs_raises():
# numpy
- s = pd.Series([1, 2, 3])
+ s = Series([1, 2, 3])
msg = r"to_numpy\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
# extension
- s = pd.Series([1, 2, 3], dtype="Int64")
+ s = Series([1, 2, 3], dtype="Int64")
with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
diff --git a/pandas/tests/base/test_misc.py b/pandas/tests/base/test_misc.py
index 2dc2fe6d2ad07..96aec2e27939a 100644
--- a/pandas/tests/base/test_misc.py
+++ b/pandas/tests/base/test_misc.py
@@ -182,7 +182,7 @@ def test_access_by_position(index):
elif isinstance(index, pd.MultiIndex):
pytest.skip("Can't instantiate Series from MultiIndex")
- series = pd.Series(index)
+ series = Series(index)
assert index[0] == series.iloc[0]
assert index[5] == series.iloc[5]
assert index[-1] == series.iloc[-1]
diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py
index 73a41e7010c5f..602133bb4122e 100644
--- a/pandas/tests/base/test_value_counts.py
+++ b/pandas/tests/base/test_value_counts.py
@@ -30,7 +30,7 @@ def test_value_counts(index_or_series_obj):
result = obj.value_counts()
counter = collections.Counter(obj)
- expected = pd.Series(dict(counter.most_common()), dtype=np.int64, name=obj.name)
+ expected = Series(dict(counter.most_common()), dtype=np.int64, name=obj.name)
expected.index = expected.index.astype(obj.dtype)
if isinstance(obj, pd.MultiIndex):
expected.index = pd.Index(expected.index)
@@ -67,7 +67,7 @@ def test_value_counts_null(null_obj, index_or_series_obj):
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
counter = collections.Counter(obj.dropna())
- expected = pd.Series(dict(counter.most_common()), dtype=np.int64)
+ expected = Series(dict(counter.most_common()), dtype=np.int64)
expected.index = expected.index.astype(obj.dtype)
result = obj.value_counts()
@@ -80,7 +80,7 @@ def test_value_counts_null(null_obj, index_or_series_obj):
# can't use expected[null_obj] = 3 as
# IntervalIndex doesn't allow assignment
- new_entry = pd.Series({np.nan: 3}, dtype=np.int64)
+ new_entry = Series({np.nan: 3}, dtype=np.int64)
expected = expected.append(new_entry)
result = obj.value_counts(dropna=False)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index f6cd500f911b2..a419cb0dded79 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -953,9 +953,9 @@ def test_registry_find(dtype, expected):
(bool, True),
(np.bool_, True),
(np.array(["a", "b"]), False),
- (pd.Series([1, 2]), False),
+ (Series([1, 2]), False),
(np.array([True, False]), True),
- (pd.Series([True, False]), True),
+ (Series([True, False]), True),
(SparseArray([True, False]), True),
(SparseDtype(bool), True),
],
@@ -966,7 +966,7 @@ def test_is_bool_dtype(dtype, expected):
def test_is_bool_dtype_sparse():
- result = is_bool_dtype(pd.Series(SparseArray([True, False])))
+ result = is_bool_dtype(Series(SparseArray([True, False])))
assert result is True
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 7fa83eeac8400..d9b229d61248d 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -1223,7 +1223,7 @@ def test_interval(self):
inferred = lib.infer_dtype(idx._data, skipna=False)
assert inferred == "interval"
- inferred = lib.infer_dtype(pd.Series(idx), skipna=False)
+ inferred = lib.infer_dtype(Series(idx), skipna=False)
assert inferred == "interval"
@pytest.mark.parametrize("klass", [pd.array, pd.Series])
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index 046b82ef3131a..e7b5d2598d8e7 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -90,8 +90,8 @@ def test_isna_isnull(self, isna_f):
assert not isna_f(-np.inf)
# type
- assert not isna_f(type(pd.Series(dtype=object)))
- assert not isna_f(type(pd.Series(dtype=np.float64)))
+ assert not isna_f(type(Series(dtype=object)))
+ assert not isna_f(type(Series(dtype=np.float64)))
assert not isna_f(type(pd.DataFrame()))
# series
@@ -247,11 +247,11 @@ def test_datetime_other_units(self):
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
- exp = pd.Series([False, True, False])
- s = pd.Series(values)
+ exp = Series([False, True, False])
+ s = Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
- s = pd.Series(values, dtype=object)
+ s = Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
@@ -278,11 +278,11 @@ def test_timedelta_other_units(self):
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
- exp = pd.Series([False, True, False])
- s = pd.Series(values)
+ exp = Series([False, True, False])
+ s = Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
- s = pd.Series(values, dtype=object)
+ s = Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
@@ -292,11 +292,11 @@ def test_period(self):
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
- exp = pd.Series([False, True, False])
- s = pd.Series(idx)
+ exp = Series([False, True, False])
+ s = Series(idx)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
- s = pd.Series(idx, dtype=object)
+ s = Series(idx, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py
index 598da9c52731e..58e91c38fc294 100644
--- a/pandas/tests/frame/apply/test_frame_apply.py
+++ b/pandas/tests/frame/apply/test_frame_apply.py
@@ -359,7 +359,7 @@ def test_apply_reduce_Series(self, float_frame):
def test_apply_reduce_rows_to_dict(self):
# GH 25196
data = pd.DataFrame([[1, 2], [3, 4]])
- expected = pd.Series([{0: 1, 1: 3}, {0: 2, 1: 4}])
+ expected = Series([{0: 1, 1: 3}, {0: 2, 1: 4}])
result = data.apply(dict)
tm.assert_series_equal(result, expected)
@@ -647,7 +647,7 @@ def test_applymap_na_ignore(self, float_frame):
def test_applymap_box_timestamps(self):
# GH 2689, GH 2627
- ser = pd.Series(date_range("1/1/2000", periods=10))
+ ser = Series(date_range("1/1/2000", periods=10))
def func(x):
return (x.hour, x.day, x.month)
@@ -815,7 +815,7 @@ def test_apply_category_equalness(self, val):
df = pd.DataFrame({"a": df_values}, dtype="category")
result = df.a.apply(lambda x: x == val)
- expected = pd.Series(
+ expected = Series(
[np.NaN if pd.isnull(x) else x == val for x in df_values], name="a"
)
tm.assert_series_equal(result, expected)
@@ -1153,12 +1153,12 @@ def test_agg_with_name_as_column_name(self):
# result's name should be None
result = df.agg({"name": "count"})
- expected = pd.Series({"name": 2})
+ expected = Series({"name": 2})
tm.assert_series_equal(result, expected)
# Check if name is still preserved when aggregating series instead
result = df["name"].agg({"name": "count"})
- expected = pd.Series({"name": 2}, name="name")
+ expected = Series({"name": 2}, name="name")
tm.assert_series_equal(result, expected)
def test_agg_multiple_mixed_no_warning(self):
@@ -1376,7 +1376,7 @@ def func(group_col):
return list(group_col.dropna().unique())
result = df.agg(func)
- expected = pd.Series([[2, 3], [1.5], ["foo", "bar"]], index=["A", "B", "C"])
+ expected = Series([[2, 3], [1.5], ["foo", "bar"]], index=["A", "B", "C"])
tm.assert_series_equal(result, expected)
result = df.agg([func])
@@ -1483,9 +1483,9 @@ def f(x, a, b, c=3):
df = pd.DataFrame([[1, 2], [3, 4]])
if axis == 0:
- expected = pd.Series([5.0, 7.0])
+ expected = Series([5.0, 7.0])
else:
- expected = pd.Series([4.0, 8.0])
+ expected = Series([4.0, 8.0])
result = df.agg(f, axis, *args, **kwargs)
@@ -1510,7 +1510,7 @@ def test_apply_datetime_tz_issue(self):
]
df = DataFrame(data=[0, 1, 2], index=timestamps)
result = df.apply(lambda x: x.name, axis=1)
- expected = pd.Series(index=timestamps, data=timestamps)
+ expected = Series(index=timestamps, data=timestamps)
tm.assert_series_equal(result, expected)
@@ -1559,7 +1559,7 @@ def test_apply_empty_list_reduce():
df = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"])
result = df.apply(lambda x: [], result_type="reduce")
- expected = pd.Series({"a": [], "b": []}, dtype=object)
+ expected = Series({"a": [], "b": []}, dtype=object)
tm.assert_series_equal(result, expected)
@@ -1578,5 +1578,5 @@ def test_apply_raw_returns_string():
# https://github.com/pandas-dev/pandas/issues/35940
df = pd.DataFrame({"A": ["aa", "bbb"]})
result = df.apply(lambda x: x[0], axis=1, raw=True)
- expected = pd.Series(["aa", "bbb"])
+ expected = Series(["aa", "bbb"])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index c097557b33f4e..4687d94b52c80 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1374,7 +1374,7 @@ def test_lookup_bool(self):
[df.loc[r, c] for r, c in zip(df.index, "mask_" + df["label"])]
)
- tm.assert_series_equal(df["mask"], pd.Series(exp_mask, name="mask"))
+ tm.assert_series_equal(df["mask"], Series(exp_mask, name="mask"))
assert df["mask"].dtype == np.bool_
def test_lookup_raises(self, float_frame):
@@ -1922,9 +1922,7 @@ def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
- column = pd.Series(
- pd.date_range("2015-01-01", periods=3, tz="utc"), name="dates"
- )
+ column = Series(pd.date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = pd.DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
@@ -2156,7 +2154,7 @@ def test_interval_index(self):
)
index_exp = pd.interval_range(start=0, periods=2, freq=1, closed="both")
- expected = pd.Series([1, 4], index=index_exp, name="A")
+ expected = Series([1, 4], index=index_exp, name="A")
result = df.loc[1, "A"]
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py
index 71b40585f0c2f..0fd2471a14fc9 100644
--- a/pandas/tests/frame/indexing/test_xs.py
+++ b/pandas/tests/frame/indexing/test_xs.py
@@ -53,7 +53,7 @@ def test_xs_corner(self):
df["E"] = 3.0
xs = df.xs(0)
- exp = pd.Series([1.0, "foo", 2.0, "bar", 3.0], index=list("ABCDE"), name=0)
+ exp = Series([1.0, "foo", 2.0, "bar", 3.0], index=list("ABCDE"), name=0)
tm.assert_series_equal(xs, exp)
# no columns but Index(dtype=object)
diff --git a/pandas/tests/frame/methods/test_align.py b/pandas/tests/frame/methods/test_align.py
index d19b59debfdea..9a3435885b80a 100644
--- a/pandas/tests/frame/methods/test_align.py
+++ b/pandas/tests/frame/methods/test_align.py
@@ -199,7 +199,7 @@ def test_align_multiindex(self):
def test_align_series_combinations(self):
df = pd.DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
- s = pd.Series([1, 2, 4], index=list("ABD"), name="x")
+ s = Series([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
@@ -207,7 +207,7 @@ def test_align_series_combinations(self):
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
- exp2 = pd.Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x")
+ exp2 = Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x")
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py
index 9fc3629e794e2..e4c469dd888b4 100644
--- a/pandas/tests/frame/methods/test_append.py
+++ b/pandas/tests/frame/methods/test_append.py
@@ -179,7 +179,7 @@ def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp):
tz = tz_naive_fixture
df = pd.DataFrame([pd.Timestamp(timestamp, tz=tz)])
result = df.append(df.iloc[0]).iloc[-1]
- expected = pd.Series(pd.Timestamp(timestamp, tz=tz), name=0)
+ expected = Series(pd.Timestamp(timestamp, tz=tz), name=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
@@ -195,5 +195,5 @@ def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp):
def test_other_dtypes(self, data, dtype):
df = pd.DataFrame(data, dtype=dtype)
result = df.append(df.iloc[0]).iloc[-1]
- expected = pd.Series(data, name=0, dtype=dtype)
+ expected = Series(data, name=0, dtype=dtype)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index f307acd8c2178..87c9dc32650c0 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -278,10 +278,10 @@ def test_corrwith_mixed_dtypes(self):
df = pd.DataFrame(
{"a": [1, 4, 3, 2], "b": [4, 6, 7, 3], "c": ["a", "b", "c", "d"]}
)
- s = pd.Series([0, 6, 7, 3])
+ s = Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df["a"].corr(s), df["b"].corr(s)]
- expected = pd.Series(data=corrs, index=["a", "b"])
+ expected = Series(data=corrs, index=["a", "b"])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
@@ -307,7 +307,7 @@ def test_corrwith_dup_cols(self):
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
- expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
+ expected = Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py
index 0b70bead375da..d10d4c8ea05ab 100644
--- a/pandas/tests/frame/methods/test_describe.py
+++ b/pandas/tests/frame/methods/test_describe.py
@@ -300,7 +300,7 @@ def test_describe_tz_values2(self):
df = pd.DataFrame({"s1": s1, "s2": s2})
s1_ = s1.describe()
- s2_ = pd.Series(
+ s2_ = Series(
[
5,
5,
diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py
index e160d5d24d40a..9ef6ba5f410a9 100644
--- a/pandas/tests/frame/methods/test_diff.py
+++ b/pandas/tests/frame/methods/test_diff.py
@@ -33,10 +33,10 @@ def test_diff(self, datetime_frame):
tm.assert_series_equal(the_diff["A"], tf["A"] - tf["A"].shift(1))
# GH#10907
- df = pd.DataFrame({"y": pd.Series([2]), "z": pd.Series([3])})
+ df = pd.DataFrame({"y": Series([2]), "z": Series([3])})
df.insert(0, "x", 1)
result = df.diff(axis=1)
- expected = pd.DataFrame({"x": np.nan, "y": pd.Series(1), "z": pd.Series(1)})
+ expected = pd.DataFrame({"x": np.nan, "y": Series(1), "z": Series(1)})
tm.assert_frame_equal(result, expected)
def test_diff_timedelta64_with_nat(self):
@@ -65,20 +65,20 @@ def test_diff_timedelta64_with_nat(self):
def test_diff_datetime_axis0_with_nat(self, tz):
# GH#32441
dti = pd.DatetimeIndex(["NaT", "2019-01-01", "2019-01-02"], tz=tz)
- ser = pd.Series(dti)
+ ser = Series(dti)
df = ser.to_frame()
result = df.diff()
ex_index = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta(days=1)])
- expected = pd.Series(ex_index).to_frame()
+ expected = Series(ex_index).to_frame()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_diff_datetime_with_nat_zero_periods(self, tz):
# diff on NaT values should give NaT, not timedelta64(0)
dti = pd.date_range("2016-01-01", periods=4, tz=tz)
- ser = pd.Series(dti)
+ ser = Series(dti)
df = ser.to_frame()
df[1] = ser.copy()
diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py
index 29a3a0106c56c..fb3fbacaf2627 100644
--- a/pandas/tests/frame/methods/test_isin.py
+++ b/pandas/tests/frame/methods/test_isin.py
@@ -127,7 +127,7 @@ def test_isin_against_series(self):
df = pd.DataFrame(
{"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}, index=["a", "b", "c", "d"]
)
- s = pd.Series([1, 3, 11, 4], index=["a", "b", "c", "d"])
+ s = Series([1, 3, 11, 4], index=["a", "b", "c", "d"])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected["A"].loc["a"] = True
expected.loc["d"] = True
@@ -194,7 +194,7 @@ def test_isin_empty_datetimelike(self):
"values",
[
pd.DataFrame({"a": [1, 2, 3]}, dtype="category"),
- pd.Series([1, 2, 3], dtype="category"),
+ Series([1, 2, 3], dtype="category"),
],
)
def test_isin_category_frame(self, values):
diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py
index 0b8f1e0495155..80e57b9d71a85 100644
--- a/pandas/tests/frame/methods/test_quantile.py
+++ b/pandas/tests/frame/methods/test_quantile.py
@@ -13,15 +13,15 @@ class TestDataFrameQuantile:
[
pd.DataFrame(
{
- 0: pd.Series(pd.arrays.SparseArray([1, 2])),
- 1: pd.Series(pd.arrays.SparseArray([3, 4])),
+ 0: Series(pd.arrays.SparseArray([1, 2])),
+ 1: Series(pd.arrays.SparseArray([3, 4])),
}
),
- pd.Series([1.5, 3.5], name=0.5),
+ Series([1.5, 3.5], name=0.5),
],
[
- pd.DataFrame(pd.Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")),
- pd.Series([1.0], name=0.5),
+ pd.DataFrame(Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")),
+ Series([1.0], name=0.5),
],
],
)
@@ -78,11 +78,11 @@ def test_quantile_date_range(self):
# GH 2460
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
- ser = pd.Series(dti)
+ ser = Series(dti)
df = pd.DataFrame(ser)
result = df.quantile(numeric_only=False)
- expected = pd.Series(
+ expected = Series(
["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]"
)
@@ -307,7 +307,7 @@ def test_quantile_box(self):
res = df.quantile(0.5, numeric_only=False)
- exp = pd.Series(
+ exp = Series(
[
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
@@ -376,7 +376,7 @@ def test_quantile_box(self):
)
res = df.quantile(0.5, numeric_only=False)
- exp = pd.Series(
+ exp = Series(
[
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-02"),
@@ -509,7 +509,7 @@ def test_quantile_empty_no_columns(self):
df = pd.DataFrame(pd.date_range("1/1/18", periods=5))
df.columns.name = "captain tightpants"
result = df.quantile(0.5)
- expected = pd.Series([], index=[], name=0.5, dtype=np.float64)
+ expected = Series([], index=[], name=0.5, dtype=np.float64)
expected.index.name = "captain tightpants"
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index a9cf840470ae0..569677f1fec5e 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -711,7 +711,7 @@ def test_replace_list(self):
def test_replace_with_empty_list(self):
# GH 21977
- s = pd.Series([["a", "b"], [], np.nan, [1]])
+ s = Series([["a", "b"], [], np.nan, [1]])
df = pd.DataFrame({"col": s})
expected = df
result = df.replace([], np.nan)
diff --git a/pandas/tests/frame/methods/test_round.py b/pandas/tests/frame/methods/test_round.py
index 3051f27882fb8..db97a3e2a0e4f 100644
--- a/pandas/tests/frame/methods/test_round.py
+++ b/pandas/tests/frame/methods/test_round.py
@@ -178,7 +178,7 @@ def test_round_with_duplicate_columns(self):
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
- decimals = pd.Series([1, 0, 2], index=["A", "B", "A"])
+ decimals = Series([1, 0, 2], index=["A", "B", "A"])
msg = "Index of decimals must be unique"
with pytest.raises(ValueError, match=msg):
df.round(decimals)
diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py
index 8f6902eca816f..5daecd6a475aa 100644
--- a/pandas/tests/frame/methods/test_shift.py
+++ b/pandas/tests/frame/methods/test_shift.py
@@ -92,8 +92,8 @@ def test_shift_bool(self):
def test_shift_categorical(self):
# GH#9416
- s1 = pd.Series(["a", "b", "c"], dtype="category")
- s2 = pd.Series(["A", "B", "C"], dtype="category")
+ s1 = Series(["a", "b", "c"], dtype="category")
+ s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
@@ -274,13 +274,13 @@ def test_datetime_frame_shift_with_freq_error(self, datetime_frame):
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
- ser = pd.Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
+ ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
df = ser.to_frame()
with tm.assert_produces_warning(FutureWarning):
result = df.shift(1, fill_value=0)
- expected = pd.Series([pd.Timestamp(0), ser[0]]).to_frame()
+ expected = Series([pd.Timestamp(0), ser[0]]).to_frame()
tm.assert_frame_equal(result, expected)
# axis = 1
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 9dab5f509bc75..9cf5afc09e800 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -131,9 +131,9 @@ def wrapper(x):
r1 = getattr(all_na, opname)(axis=1)
if opname in ["sum", "prod"]:
unit = 1 if opname == "prod" else 0 # result for empty sum/prod
- expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
+ expected = Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
- expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
+ expected = Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
@@ -466,7 +466,7 @@ def test_mean_mixed_datetime_numeric(self, tz):
df = pd.DataFrame({"A": [1, 1], "B": [pd.Timestamp("2000", tz=tz)] * 2})
with tm.assert_produces_warning(FutureWarning):
result = df.mean()
- expected = pd.Series([1.0], index=["A"])
+ expected = Series([1.0], index=["A"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
@@ -478,7 +478,7 @@ def test_mean_excludes_datetimes(self, tz):
with tm.assert_produces_warning(FutureWarning):
result = df.mean()
- expected = pd.Series(dtype=np.float64)
+ expected = Series(dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_mean_mixed_string_decimal(self):
@@ -501,7 +501,7 @@ def test_mean_mixed_string_decimal(self):
df = pd.DataFrame(d)
result = df.mean()
- expected = pd.Series([2.7, 681.6], index=["A", "C"])
+ expected = Series([2.7, 681.6], index=["A", "C"])
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
@@ -771,30 +771,30 @@ def test_sum_prod_nanops(self, method, unit):
)
# The default
result = getattr(df, method)
- expected = pd.Series([unit, unit, unit], index=idx, dtype="float64")
+ expected = Series([unit, unit, unit], index=idx, dtype="float64")
# min_count=1
result = getattr(df, method)(min_count=1)
- expected = pd.Series([unit, unit, np.nan], index=idx)
+ expected = Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
- expected = pd.Series([unit, unit, unit], index=idx, dtype="float64")
+ expected = Series([unit, unit, unit], index=idx, dtype="float64")
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
- expected = pd.Series([unit, np.nan, np.nan], index=idx)
+ expected = Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
- expected = pd.Series(result, index=["A", "B"])
+ expected = Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
- expected = pd.Series(result, index=["A", "B"])
+ expected = Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
@@ -806,7 +806,7 @@ def test_sum_nanops_timedelta(self):
# 0 by default
result = df2.sum()
- expected = pd.Series([0, 0, 0], dtype="m8[ns]", index=idx)
+ expected = Series([0, 0, 0], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
@@ -815,7 +815,7 @@ def test_sum_nanops_timedelta(self):
# min_count=1
result = df2.sum(min_count=1)
- expected = pd.Series([0, 0, np.nan], dtype="m8[ns]", index=idx)
+ expected = Series([0, 0, np.nan], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
@@ -837,7 +837,7 @@ def test_sum_mixed_datetime(self):
).reindex([2, 3, 4])
result = df.sum()
- expected = pd.Series({"B": 7.0})
+ expected = Series({"B": 7.0})
tm.assert_series_equal(result, expected)
def test_mean_corner(self, float_frame, float_string_frame):
@@ -870,13 +870,13 @@ def test_mean_datetimelike(self):
}
)
result = df.mean(numeric_only=True)
- expected = pd.Series({"A": 1.0})
+ expected = Series({"A": 1.0})
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
# in the future datetime columns will be included
result = df.mean()
- expected = pd.Series({"A": 1.0, "C": df.loc[1, "C"]})
+ expected = Series({"A": 1.0, "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
def test_mean_datetimelike_numeric_only_false(self):
@@ -890,7 +890,7 @@ def test_mean_datetimelike_numeric_only_false(self):
# datetime(tz) and timedelta work
result = df.mean(numeric_only=False)
- expected = pd.Series({"A": 1, "B": df.loc[1, "B"], "C": df.loc[1, "C"]})
+ expected = Series({"A": 1, "B": df.loc[1, "B"], "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
# mean of period is not allowed
@@ -1055,28 +1055,28 @@ def test_any_all_bool_only(self):
(np.any, {"A": [False, False], "B": [False, True]}, True),
(np.all, {"A": [False, False], "B": [False, True]}, False),
# other types
- (np.all, {"A": pd.Series([0.0, 1.0], dtype="float")}, False),
- (np.any, {"A": pd.Series([0.0, 1.0], dtype="float")}, True),
- (np.all, {"A": pd.Series([0, 1], dtype=int)}, False),
- (np.any, {"A": pd.Series([0, 1], dtype=int)}, True),
- pytest.param(np.all, {"A": pd.Series([0, 1], dtype="M8[ns]")}, False),
- pytest.param(np.any, {"A": pd.Series([0, 1], dtype="M8[ns]")}, True),
- pytest.param(np.all, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True),
- pytest.param(np.any, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True),
- pytest.param(np.all, {"A": pd.Series([0, 1], dtype="m8[ns]")}, False),
- pytest.param(np.any, {"A": pd.Series([0, 1], dtype="m8[ns]")}, True),
- pytest.param(np.all, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True),
- pytest.param(np.any, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True),
- (np.all, {"A": pd.Series([0, 1], dtype="category")}, False),
- (np.any, {"A": pd.Series([0, 1], dtype="category")}, True),
- (np.all, {"A": pd.Series([1, 2], dtype="category")}, True),
- (np.any, {"A": pd.Series([1, 2], dtype="category")}, True),
+ (np.all, {"A": Series([0.0, 1.0], dtype="float")}, False),
+ (np.any, {"A": Series([0.0, 1.0], dtype="float")}, True),
+ (np.all, {"A": Series([0, 1], dtype=int)}, False),
+ (np.any, {"A": Series([0, 1], dtype=int)}, True),
+ pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns]")}, False),
+ pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns]")}, True),
+ pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns]")}, True),
+ pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns]")}, True),
+ pytest.param(np.all, {"A": Series([0, 1], dtype="m8[ns]")}, False),
+ pytest.param(np.any, {"A": Series([0, 1], dtype="m8[ns]")}, True),
+ pytest.param(np.all, {"A": Series([1, 2], dtype="m8[ns]")}, True),
+ pytest.param(np.any, {"A": Series([1, 2], dtype="m8[ns]")}, True),
+ (np.all, {"A": Series([0, 1], dtype="category")}, False),
+ (np.any, {"A": Series([0, 1], dtype="category")}, True),
+ (np.all, {"A": Series([1, 2], dtype="category")}, True),
+ (np.any, {"A": Series([1, 2], dtype="category")}, True),
# Mix GH#21484
pytest.param(
np.all,
{
- "A": pd.Series([10, 20], dtype="M8[ns]"),
- "B": pd.Series([10, 20], dtype="m8[ns]"),
+ "A": Series([10, 20], dtype="M8[ns]"),
+ "B": Series([10, 20], dtype="m8[ns]"),
},
True,
),
@@ -1137,22 +1137,22 @@ def test_min_max_dt64_with_NaT(self):
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]})
res = df.min()
- exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
+ exp = Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
- exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
+ exp = Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT]})
res = df.min()
- exp = pd.Series([pd.NaT], index=["foo"])
+ exp = Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
- exp = pd.Series([pd.NaT], index=["foo"])
+ exp = Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_min_max_dt64_api_consistency_with_NaT(self):
@@ -1161,7 +1161,7 @@ def test_min_max_dt64_api_consistency_with_NaT(self):
# min/max calls on empty Series/DataFrames. See GH:33704 for more
# information
df = pd.DataFrame(dict(x=pd.to_datetime([])))
- expected_dt_series = pd.Series(pd.to_datetime([]))
+ expected_dt_series = Series(pd.to_datetime([]))
# check axis 0
assert (df.min(axis=0).x is pd.NaT) == (expected_dt_series.min() is pd.NaT)
assert (df.max(axis=0).x is pd.NaT) == (expected_dt_series.max() is pd.NaT)
@@ -1174,7 +1174,7 @@ def test_min_max_dt64_api_consistency_empty_df(self):
# check DataFrame/Series api consistency when calling min/max on an empty
# DataFrame/Series.
df = pd.DataFrame(dict(x=[]))
- expected_float_series = pd.Series([], dtype=float)
+ expected_float_series = Series([], dtype=float)
# check axis 0
assert np.isnan(df.min(axis=0).x) == np.isnan(expected_float_series.min())
assert np.isnan(df.max(axis=0).x) == np.isnan(expected_float_series.max())
@@ -1201,7 +1201,7 @@ def test_mixed_frame_with_integer_sum():
df = pd.DataFrame([["a", 1]], columns=list("ab"))
df = df.astype({"b": "Int64"})
result = df.sum()
- expected = pd.Series(["a", 1], index=["a", "b"])
+ expected = Series(["a", 1], index=["a", "b"])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 2c04473d50851..74e6170d861b4 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -212,12 +212,12 @@ def _test_seq(df, idx_ser, col_ser):
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
- tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
+ tm.assert_frame_equal(col_eq, df == Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
- tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
+ tm.assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
@@ -225,7 +225,7 @@ def _test_seq(df, idx_ser, col_ser):
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
- tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
+ tm.assert_frame_equal(col_gt, df > Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
@@ -234,13 +234,13 @@ def _test_seq(df, idx_ser, col_ser):
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
- tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
+ tm.assert_frame_equal(col_ge, df >= Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
- idx_ser = pd.Series(np.random.randn(5))
- col_ser = pd.Series(np.random.randn(3))
+ idx_ser = Series(np.random.randn(5))
+ col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
@@ -333,7 +333,7 @@ def test_df_flex_cmp_constant_return_types(self, opname):
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
- tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
+ tm.assert_series_equal(result, Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
@@ -343,19 +343,19 @@ def test_df_flex_cmp_constant_return_types_empty(self, opname):
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
- tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
+ tm.assert_series_equal(result, Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
- ser = pd.Series([0, 0])
+ ser = Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
- ser2 = pd.Series([1, 2], index=["A", "B"])
+ ser2 = Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
@@ -368,7 +368,7 @@ class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
- ser = pd.Series(arr)
+ ser = Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
@@ -403,7 +403,7 @@ def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
- tser = pd.Series(tdi)
+ tser = Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
@@ -413,7 +413,7 @@ def test_df_add_td64_columnwise(self):
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
- ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
+ ser = Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
@@ -421,7 +421,7 @@ def test_df_add_flex_filled_mixed_dtypes(self):
expected = pd.DataFrame(
{
- "A": pd.Series(
+ "A": Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
@@ -544,7 +544,7 @@ def test_arith_flex_series(self, simple_frame):
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
- ser_len0 = pd.Series([], dtype=object)
+ ser_len0 = Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
@@ -568,7 +568,7 @@ class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
- ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
+ ser = Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
@@ -789,7 +789,7 @@ def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
- ser = pd.Series(dtype=np.float64)
+ ser = Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
@@ -813,7 +813,7 @@ def test_frame_with_zero_len_series_corner_cases():
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
- ser = pd.Series([1, 2], index=["A", "B"])
+ ser = Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
@@ -823,11 +823,11 @@ def test_zero_len_frame_with_series_corner_cases():
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
- "One": pd.Series(["A", 1.2, np.nan]),
+ "One": Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
- expected = pd.Series(["A", 1.2, 0])
+ expected = Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
@@ -941,7 +941,7 @@ def test_binary_ops_align(self):
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
- s = pd.Series({"a": 1, "b": 2})
+ s = Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
@@ -1521,7 +1521,7 @@ def test_pow_nan_with_zero():
def test_dataframe_series_extension_dtypes():
# https://github.com/pandas-dev/pandas/issues/34311
df = pd.DataFrame(np.random.randint(0, 100, (10, 3)), columns=["a", "b", "c"])
- ser = pd.Series([1, 2, 3], index=["a", "b", "c"])
+ ser = Series([1, 2, 3], index=["a", "b", "c"])
expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3)
expected = pd.DataFrame(expected, columns=df.columns, dtype="Int64")
@@ -1566,7 +1566,7 @@ def test_dataframe_operation_with_non_numeric_types(df, col_dtype):
# GH #22663
expected = pd.DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab"))
expected = expected.astype({"b": col_dtype})
- result = df + pd.Series([-1.0], index=list("a"))
+ result = df + Series([-1.0], index=list("a"))
tm.assert_frame_equal(result, expected)
@@ -1579,9 +1579,7 @@ def test_arith_reindex_with_duplicates():
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize(
- "to_add", [[pd.Series([1, 1])], [pd.Series([1, 1]), pd.Series([1, 1])]]
-)
+@pytest.mark.parametrize("to_add", [[Series([1, 1])], [Series([1, 1]), Series([1, 1])]])
def test_arith_list_of_arraylike_raise(to_add):
# GH 36702. Raise when trying to add list of array-like to DataFrame
df = pd.DataFrame({"x": [1, 2], "y": [1, 2]})
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index f5d2bd27762ef..2877905ddced1 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -606,7 +606,7 @@ def test_strange_column_corruption_issue(self):
def test_constructor_no_pandas_array(self):
# Ensure that PandasArray isn't allowed inside Series
# See https://github.com/pandas-dev/pandas/issues/23995 for more.
- arr = pd.Series([1, 2, 3]).array
+ arr = Series([1, 2, 3]).array
result = pd.DataFrame({"A": arr})
expected = pd.DataFrame({"A": [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@@ -648,7 +648,7 @@ def test_to_dict_of_blocks_item_cache():
def test_update_inplace_sets_valid_block_values():
# https://github.com/pandas-dev/pandas/issues/33457
- df = pd.DataFrame({"a": pd.Series([1, 2, None], dtype="category")})
+ df = pd.DataFrame({"a": Series([1, 2, None], dtype="category")})
# inplace update of a single column
df["a"].fillna(1, inplace=True)
@@ -665,8 +665,8 @@ def test_nonconsolidated_item_cache_take():
# create non-consolidated dataframe with object dtype columns
df = pd.DataFrame()
- df["col1"] = pd.Series(["a"], dtype=object)
- df["col2"] = pd.Series([0], dtype=object)
+ df["col1"] = Series(["a"], dtype=object)
+ df["col2"] = Series([0], dtype=object)
# access column (item cache)
df["col1"] == "A"
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index c6708a7b7f6c9..2bc6953217cf8 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1653,7 +1653,7 @@ def test_constructor_index_names(self, name_in1, name_in2, name_in3, name_out):
pd.Index(["c", "d", "e"], name=name_in3),
]
series = {
- c: pd.Series([0, 1, 2], index=i) for i, c in zip(indices, ["x", "y", "z"])
+ c: Series([0, 1, 2], index=i) for i, c in zip(indices, ["x", "y", "z"])
}
result = pd.DataFrame(series)
@@ -2566,7 +2566,7 @@ def test_from_records_series_categorical_index(self):
index = CategoricalIndex(
[pd.Interval(-20, -10), pd.Interval(-10, 0), pd.Interval(0, 10)]
)
- series_of_dicts = pd.Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
+ series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
frame = pd.DataFrame.from_records(series_of_dicts, index=index)
expected = DataFrame(
{"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 5917520802519..96e56c329475c 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -36,23 +36,21 @@ def test_concat_empty_dataframe_dtypes(self):
def test_empty_frame_dtypes(self):
empty_df = pd.DataFrame()
- tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=object))
+ tm.assert_series_equal(empty_df.dtypes, Series(dtype=object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
- tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=object))
+ tm.assert_series_equal(nocols_df.dtypes, Series(dtype=object))
norows_df = pd.DataFrame(columns=list("abc"))
- tm.assert_series_equal(norows_df.dtypes, pd.Series(object, index=list("abc")))
+ tm.assert_series_equal(norows_df.dtypes, Series(object, index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
- norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
+ norows_int_df.dtypes, Series(np.dtype("int32"), index=list("abc"))
)
df = pd.DataFrame(dict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
- ex_dtypes = pd.Series(
- dict([("a", np.int64), ("b", np.bool_), ("c", np.float64)])
- )
+ ex_dtypes = Series(dict([("a", np.int64), ("b", np.bool_), ("c", np.float64)]))
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
@@ -85,14 +83,12 @@ def test_dtypes_are_correct_after_column_slice(self):
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
tm.assert_series_equal(
df.dtypes,
- pd.Series(dict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
- )
- tm.assert_series_equal(
- df.iloc[:, 2:].dtypes, pd.Series(dict([("c", np.float_)]))
+ Series(dict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
+ tm.assert_series_equal(df.iloc[:, 2:].dtypes, Series(dict([("c", np.float_)])))
tm.assert_series_equal(
df.dtypes,
- pd.Series(dict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
+ Series(dict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_dtypes_gh8722(self, float_string_frame):
@@ -114,7 +110,7 @@ def test_singlerow_slice_categoricaldtype_gives_series(self):
df = pd.DataFrame({"x": pd.Categorical("a b c d e".split())})
result = df.iloc[0]
raw_cat = pd.Categorical(["a"], categories=["a", "b", "c", "d", "e"])
- expected = pd.Series(raw_cat, index=["x"], name=0, dtype="category")
+ expected = Series(raw_cat, index=["x"], name=0, dtype="category")
tm.assert_series_equal(result, expected)
@@ -257,15 +253,15 @@ def test_convert_dtypes(self, convert_integer, expected):
# Just check that it works for DataFrame here
df = pd.DataFrame(
{
- "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
- "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
+ "a": Series([1, 2, 3], dtype=np.dtype("int32")),
+ "b": Series(["x", "y", "z"], dtype=np.dtype("O")),
}
)
result = df.convert_dtypes(True, True, convert_integer, False)
expected = pd.DataFrame(
{
- "a": pd.Series([1, 2, 3], dtype=expected),
- "b": pd.Series(["x", "y", "z"], dtype="string"),
+ "a": Series([1, 2, 3], dtype=expected),
+ "b": Series(["x", "y", "z"], dtype="string"),
}
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 5d3f8e3a2f7c1..4b33fd7832cb8 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -131,7 +131,7 @@ def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name="A")
expected = Series([1, 2], dtype=original.dtype, name="A")
- df = pd.DataFrame({"A": original.values.copy()})
+ df = DataFrame({"A": original.values.copy()})
df2 = df.copy()
df["A"].dropna()
tm.assert_series_equal(df["A"], original)
@@ -203,7 +203,7 @@ def test_dropna_categorical_interval_index(self):
# GH 25087
ii = pd.IntervalIndex.from_breaks([0, 2.78, 3.14, 6.28])
ci = pd.CategoricalIndex(ii)
- df = pd.DataFrame({"A": list("abc")}, index=ci)
+ df = DataFrame({"A": list("abc")}, index=ci)
expected = df
result = df.dropna()
@@ -303,8 +303,8 @@ def test_fillna_datelike(self):
def test_fillna_tzaware(self):
# with timezone
# GH#15855
- df = pd.DataFrame({"A": [pd.Timestamp("2012-11-11 00:00:00+01:00"), pd.NaT]})
- exp = pd.DataFrame(
+ df = DataFrame({"A": [pd.Timestamp("2012-11-11 00:00:00+01:00"), pd.NaT]})
+ exp = DataFrame(
{
"A": [
pd.Timestamp("2012-11-11 00:00:00+01:00"),
@@ -314,8 +314,8 @@ def test_fillna_tzaware(self):
)
tm.assert_frame_equal(df.fillna(method="pad"), exp)
- df = pd.DataFrame({"A": [pd.NaT, pd.Timestamp("2012-11-11 00:00:00+01:00")]})
- exp = pd.DataFrame(
+ df = DataFrame({"A": [pd.NaT, pd.Timestamp("2012-11-11 00:00:00+01:00")]})
+ exp = DataFrame(
{
"A": [
pd.Timestamp("2012-11-11 00:00:00+01:00"),
@@ -328,14 +328,14 @@ def test_fillna_tzaware(self):
def test_fillna_tzaware_different_column(self):
# with timezone in another column
# GH#15522
- df = pd.DataFrame(
+ df = DataFrame(
{
"A": pd.date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1, 2, np.nan, np.nan],
}
)
result = df.fillna(method="pad")
- expected = pd.DataFrame(
+ expected = DataFrame(
{
"A": pd.date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1.0, 2.0, 2.0, 2.0],
@@ -378,7 +378,7 @@ def test_na_actions_categorical(self):
# make sure that fillna takes missing values into account
c = Categorical([np.nan, "b", np.nan], categories=["a", "b"])
- df = pd.DataFrame({"cats": c, "vals": [1, 2, 3]})
+ df = DataFrame({"cats": c, "vals": [1, 2, 3]})
cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"])
df_exp = DataFrame({"cats": cat_exp, "vals": [1, 2, 3]})
@@ -427,15 +427,15 @@ def test_fillna_categorical_nan(self):
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
- df = pd.DataFrame({"a": [1.0, np.nan]})
+ df = DataFrame({"a": [1.0, np.nan]})
result = df.fillna(0, downcast="infer")
- expected = pd.DataFrame({"a": [1, 0]})
+ expected = DataFrame({"a": [1, 0]})
tm.assert_frame_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
- df = pd.DataFrame({"a": [1.0, np.nan]})
+ df = DataFrame({"a": [1.0, np.nan]})
result = df.fillna({"a": 0}, downcast="infer")
- expected = pd.DataFrame({"a": [1, 0]})
+ expected = DataFrame({"a": [1, 0]})
tm.assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
@@ -464,7 +464,7 @@ def test_fillna_dtype_conversion(self):
def test_fillna_datetime_columns(self):
# GH 7095
- df = pd.DataFrame(
+ df = DataFrame(
{
"A": [-1, -2, np.nan],
"B": date_range("20130101", periods=3),
@@ -474,7 +474,7 @@ def test_fillna_datetime_columns(self):
index=date_range("20130110", periods=3),
)
result = df.fillna("?")
- expected = pd.DataFrame(
+ expected = DataFrame(
{
"A": [-1, -2, "?"],
"B": date_range("20130101", periods=3),
@@ -485,7 +485,7 @@ def test_fillna_datetime_columns(self):
)
tm.assert_frame_equal(result, expected)
- df = pd.DataFrame(
+ df = DataFrame(
{
"A": [-1, -2, np.nan],
"B": [pd.Timestamp("2013-01-01"), pd.Timestamp("2013-01-02"), pd.NaT],
@@ -495,7 +495,7 @@ def test_fillna_datetime_columns(self):
index=date_range("20130110", periods=3),
)
result = df.fillna("?")
- expected = pd.DataFrame(
+ expected = DataFrame(
{
"A": [-1, -2, "?"],
"B": [pd.Timestamp("2013-01-01"), pd.Timestamp("2013-01-02"), "?"],
diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py
index a8b76f4d85f49..172fcdc78e604 100644
--- a/pandas/tests/frame/test_nonunique_indexes.py
+++ b/pandas/tests/frame/test_nonunique_indexes.py
@@ -238,7 +238,7 @@ def check(result, expected=None):
)
for index in [df.index, pd.Index(list("edcba"))]:
this_df = df.copy()
- expected_ser = pd.Series(index.values, index=this_df.index)
+ expected_ser = Series(index.values, index=this_df.index)
expected_df = DataFrame(
{"A": expected_ser, "B": this_df["B"], "A": expected_ser},
columns=["A", "B", "A"],
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index b10fdbb707404..67c53a56eebe9 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -182,7 +182,7 @@ def test_unstack_fill(self):
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("w", "b", "j")
expected = unstacked[key]
- result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
+ result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
@@ -315,7 +315,7 @@ def test_unstack_fill_frame_period(self):
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
- data = pd.Series(["a", "b", "c", "a"], dtype="category")
+ data = Series(["a", "b", "c", "a"], dtype="category")
data.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
@@ -427,15 +427,15 @@ def test_unstack_preserve_dtypes(self):
dict(
state=["IL", "MI", "NC"],
index=["a", "b", "c"],
- some_categories=pd.Series(["a", "b", "c"]).astype("category"),
+ some_categories=Series(["a", "b", "c"]).astype("category"),
A=np.random.rand(3),
B=1,
C="foo",
D=pd.Timestamp("20010102"),
- E=pd.Series([1.0, 50.0, 100.0]).astype("float32"),
- F=pd.Series([3.0, 4.0, 5.0]).astype("float64"),
+ E=Series([1.0, 50.0, 100.0]).astype("float32"),
+ F=Series([3.0, 4.0, 5.0]).astype("float64"),
G=False,
- H=pd.Series([1, 200, 923442], dtype="int8"),
+ H=Series([1, 200, 923442], dtype="int8"),
)
)
@@ -586,7 +586,7 @@ def test_unstack_level_binding(self):
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
- s = pd.Series(0, index=mi)
+ s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
@@ -1144,7 +1144,7 @@ def test_stack_preserve_categorical_dtype_values(self):
df = pd.DataFrame({"A": cat, "B": cat})
result = df.stack()
index = pd.MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])
- expected = pd.Series(
+ expected = Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@@ -1186,7 +1186,7 @@ def test_unstack_mixed_extension_types(self, level):
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
- expected_dtypes = pd.Series(
+ expected_dtypes = Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
@@ -1213,7 +1213,7 @@ def test_unstack_swaplevel_sortlevel(self, level):
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
- data = pd.Series(["a", "b", "c", "a"], dtype="object")
+ data = Series(["a", "b", "c", "a"], dtype="object")
data.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
@@ -1264,7 +1264,7 @@ def test_stack_timezone_aware_values():
)
df = pd.DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack()
- expected = pd.Series(
+ expected = Series(
ts,
index=pd.MultiIndex(
levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 2c2584e8dee01..fe1c476ed2205 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -604,7 +604,7 @@ def test_sample(sel):
df.sample(n=1, axis="not_a_name")
with pytest.raises(ValueError):
- s = pd.Series(range(10))
+ s = Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
@@ -890,7 +890,7 @@ def test_axis_numbers_deprecated(self, box):
@pytest.mark.parametrize("as_frame", [True, False])
def test_flags_identity(self, as_frame):
- s = pd.Series([1, 2])
+ s = Series([1, 2])
if as_frame:
s = s.to_frame()
diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py
index 07c02330d85ce..0f8df5bb78304 100644
--- a/pandas/tests/generic/test_series.py
+++ b/pandas/tests/generic/test_series.py
@@ -37,7 +37,7 @@ def test_set_axis_name_mi(self, func):
assert result.index.names, ["L1", "L2"]
def test_set_axis_name_raises(self):
- s = pd.Series([1])
+ s = Series([1])
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
s._set_axis_name(name="a", axis=1)
@@ -166,7 +166,7 @@ class TestSeries2:
[
Series([np.arange(5)]),
pd.date_range("1/1/2011", periods=24, freq="H"),
- pd.Series(range(5), index=pd.date_range("2017", periods=5)),
+ Series(range(5), index=pd.date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
@@ -177,5 +177,5 @@ def test_shift_always_copy(self, s, shift_size):
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1M")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH22397
- s = pd.Series(range(5), index=pd.date_range("2017", periods=5))
+ s = Series(range(5), index=pd.date_range("2017", periods=5))
assert s.shift(freq=move_by_freq) is not s
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 4a0ea5f520873..c7a52dd45fadc 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -213,14 +213,10 @@ def test_aggregate_item_by_item(df):
# GH5782
# odd comparisons can result here, so cast to make easy
- exp = pd.Series(
- np.array([foo] * K), index=list("BCD"), dtype=np.float64, name="foo"
- )
+ exp = Series(np.array([foo] * K), index=list("BCD"), dtype=np.float64, name="foo")
tm.assert_series_equal(result.xs("foo"), exp)
- exp = pd.Series(
- np.array([bar] * K), index=list("BCD"), dtype=np.float64, name="bar"
- )
+ exp = Series(np.array([bar] * K), index=list("BCD"), dtype=np.float64, name="bar")
tm.assert_almost_equal(result.xs("bar"), exp)
def aggfun(ser):
@@ -518,7 +514,7 @@ def test_agg_split_object_part_datetime():
class TestNamedAggregationSeries:
def test_series_named_agg(self):
- df = pd.Series([1, 2, 3, 4])
+ df = Series([1, 2, 3, 4])
gr = df.groupby([0, 0, 1, 1])
result = gr.agg(a="sum", b="min")
expected = pd.DataFrame(
@@ -531,7 +527,7 @@ def test_series_named_agg(self):
tm.assert_frame_equal(result, expected)
def test_no_args_raises(self):
- gr = pd.Series([1, 2]).groupby([0, 1])
+ gr = Series([1, 2]).groupby([0, 1])
with pytest.raises(TypeError, match="Must provide"):
gr.agg()
@@ -542,13 +538,13 @@ def test_no_args_raises(self):
def test_series_named_agg_duplicates_no_raises(self):
# GH28426
- gr = pd.Series([1, 2, 3]).groupby([0, 0, 1])
+ gr = Series([1, 2, 3]).groupby([0, 0, 1])
grouped = gr.agg(a="sum", b="sum")
expected = pd.DataFrame({"a": [3, 3], "b": [3, 3]})
tm.assert_frame_equal(expected, grouped)
def test_mangled(self):
- gr = pd.Series([1, 2, 3]).groupby([0, 0, 1])
+ gr = Series([1, 2, 3]).groupby([0, 0, 1])
result = gr.agg(a=lambda x: 0, b=lambda x: 1)
expected = pd.DataFrame({"a": [0, 0], "b": [1, 1]})
tm.assert_frame_equal(result, expected)
@@ -563,7 +559,7 @@ def test_mangled(self):
)
def test_named_agg_nametuple(self, inp):
# GH34422
- s = pd.Series([1, 1, 2, 2, 3, 3, 4, 5])
+ s = Series([1, 1, 2, 2, 3, 3, 4, 5])
msg = f"func is expected but received {type(inp).__name__}"
with pytest.raises(TypeError, match=msg):
s.groupby(s.values).agg(a=inp)
@@ -916,7 +912,7 @@ def test_groupby_aggregate_period_column(func):
result = getattr(df.groupby("a")["b"], func)()
idx = pd.Int64Index([1, 2], name="a")
- expected = pd.Series(periods, index=idx, name="b")
+ expected = Series(periods, index=idx, name="b")
tm.assert_series_equal(result, expected)
@@ -947,7 +943,7 @@ def test_basic(self):
tm.assert_frame_equal(result, expected)
def test_mangle_series_groupby(self):
- gr = pd.Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
+ gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
result = gr.agg([lambda x: 0, lambda x: 1])
expected = pd.DataFrame({"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]})
tm.assert_frame_equal(result, expected)
@@ -956,11 +952,11 @@ def test_mangle_series_groupby(self):
def test_with_kwargs(self):
f1 = lambda x, y, b=1: x.sum() + y + b
f2 = lambda x, y, b=2: x.sum() + y * b
- result = pd.Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
+ result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
expected = pd.DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})
tm.assert_frame_equal(result, expected)
- result = pd.Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
+ result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
expected = pd.DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
tm.assert_frame_equal(result, expected)
@@ -1166,5 +1162,5 @@ def test_agg_no_suffix_index():
# test Series case
result = df["A"].agg(["sum", lambda x: x.sum(), lambda x: x.sum()])
- expected = pd.Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"], name="A")
+ expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"], name="A")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index e8cd6017a117c..a5f947cf656a0 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -130,11 +130,11 @@ def test_agg_dict_parameter_cast_result_dtypes():
tm.assert_series_equal(grouped.time.agg("last"), exp["time"])
# count
- exp = pd.Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
+ exp = Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.agg(len), exp)
tm.assert_series_equal(grouped.time.size(), exp)
- exp = pd.Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
+ exp = Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.count(), exp)
@@ -443,18 +443,18 @@ def test_agg_tzaware_non_datetime_result():
# Case that _does_ preserve the dtype
result = gb["b"].agg(lambda x: x.iloc[0])
- expected = pd.Series(dti[::2], name="b")
+ expected = Series(dti[::2], name="b")
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# Cases that do _not_ preserve the dtype
result = gb["b"].agg(lambda x: x.iloc[0].year)
- expected = pd.Series([2012, 2012], name="b")
+ expected = Series([2012, 2012], name="b")
expected.index.name = "a"
tm.assert_series_equal(result, expected)
result = gb["b"].agg(lambda x: x.iloc[-1] - x.iloc[0])
- expected = pd.Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b")
+ expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b")
expected.index.name = "a"
tm.assert_series_equal(result, expected)
@@ -542,10 +542,10 @@ def test_agg_structs_dataframe(structure, expected):
@pytest.mark.parametrize(
"structure, expected",
[
- (tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
- (list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
- (lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
- (lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
+ (tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
+ (list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
+ (lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
+ (lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
],
)
def test_agg_structs_series(structure, expected):
@@ -565,7 +565,7 @@ def test_agg_category_nansum(observed):
{"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}
)
result = df.groupby("A", observed=observed).B.agg(np.nansum)
- expected = pd.Series(
+ expected = Series(
[3, 3, 0],
index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),
name="B",
@@ -633,7 +633,7 @@ def test_groupby_agg_err_catching(err_cls):
{"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
)
- expected = pd.Series(to_decimal([data[0], data[3]]))
+ expected = Series(to_decimal([data[0], data[3]]))
def weird_func(x):
# weird function that raise something other than TypeError or IndexError
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 176efdb6204da..feb758c82285d 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -568,7 +568,7 @@ def test_apply_reindex_values():
df = pd.DataFrame(
{"group": ["Group1", "Group2"] * 2, "value": values}, index=indices
)
- expected = pd.Series(values, index=indices, name="value")
+ expected = Series(values, index=indices, name="value")
def reindex_helper(x):
return x.reindex(np.arange(x.index.min(), x.index.max() + 1))
@@ -631,7 +631,7 @@ def get_B(g):
# GH 14423
def predictions(tool):
- out = pd.Series(index=["p1", "p2", "useTime"], dtype=object)
+ out = Series(index=["p1", "p2", "useTime"], dtype=object)
if "step1" in list(tool.State):
out["p1"] = str(tool[tool.State == "step1"].Machine.values[0])
if "step2" in list(tool.State):
@@ -666,7 +666,7 @@ def test_apply_aggregating_timedelta_and_datetime():
)
df["time_delta_zero"] = df.datetime - df.datetime
result = df.groupby("clientid").apply(
- lambda ddf: pd.Series(
+ lambda ddf: Series(
dict(clientid_age=ddf.time_delta_zero.min(), date=ddf.datetime.min())
)
)
@@ -707,10 +707,10 @@ def test_time_field_bug():
df = pd.DataFrame({"a": 1, "b": [datetime.now() for nn in range(10)]})
def func_with_no_date(batch):
- return pd.Series({"c": 2})
+ return Series({"c": 2})
def func_with_date(batch):
- return pd.Series({"b": datetime(2015, 1, 1), "c": 2})
+ return Series({"b": datetime(2015, 1, 1), "c": 2})
dfg_no_conversion = df.groupby(by=["a"]).apply(func_with_no_date)
dfg_no_conversion_expected = pd.DataFrame({"c": 2}, index=[1])
@@ -791,7 +791,7 @@ def test_groupby_apply_return_empty_chunk():
df = pd.DataFrame(dict(value=[0, 1], group=["filled", "empty"]))
groups = df.groupby("group")
result = groups.apply(lambda group: group[group.value != 1]["value"])
- expected = pd.Series(
+ expected = Series(
[0],
name="value",
index=MultiIndex.from_product(
@@ -836,7 +836,7 @@ def test_apply_datetime_issue(group_column_dtlike):
# standard int values in range(len(num_columns))
df = pd.DataFrame({"a": ["foo"], "b": [group_column_dtlike]})
- result = df.groupby("a").apply(lambda x: pd.Series(["spam"], index=[42]))
+ result = df.groupby("a").apply(lambda x: Series(["spam"], index=[42]))
expected = pd.DataFrame(
["spam"], Index(["foo"], dtype="object", name="a"), columns=[42]
@@ -876,7 +876,7 @@ def most_common_values(df):
return Series({c: s.value_counts().index[0] for c, s in df.iteritems()})
result = tdf.groupby("day").apply(most_common_values)["userId"]
- expected = pd.Series(
+ expected = Series(
["17661101"], index=pd.DatetimeIndex(["2015-02-24"], name="day"), name="userId"
)
tm.assert_series_equal(result, expected)
@@ -955,7 +955,7 @@ def test_apply_function_returns_non_pandas_non_scalar(function, expected_values)
# GH 31441
df = pd.DataFrame(["A", "A", "B", "B"], columns=["groups"])
result = df.groupby("groups").apply(function)
- expected = pd.Series(expected_values, index=pd.Index(["A", "B"], name="groups"))
+ expected = Series(expected_values, index=pd.Index(["A", "B"], name="groups"))
tm.assert_series_equal(result, expected)
@@ -967,7 +967,7 @@ def fct(group):
df = pd.DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]})
result = df.groupby("A").apply(fct)
- expected = pd.Series(
+ expected = Series(
[[1.0, 2.0], [3.0], [np.nan]], index=pd.Index(["a", "b", "none"], name="A")
)
tm.assert_series_equal(result, expected)
@@ -978,7 +978,7 @@ def test_apply_function_index_return(function):
# GH: 22541
df = pd.DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"])
result = df.groupby("id").apply(function)
- expected = pd.Series(
+ expected = Series(
[pd.Index([0, 4, 7, 9]), pd.Index([1, 2, 3, 5]), pd.Index([6, 8])],
index=pd.Index([1, 2, 3], name="id"),
)
@@ -996,7 +996,7 @@ def fn(x):
return x.col2
result = df.groupby(["col1"], as_index=False).apply(fn)
- expected = pd.Series(
+ expected = Series(
[1, 2, 0, 4, 5, 0],
index=pd.MultiIndex.from_tuples(
[(0, 0), (0, 1), (0, 2), (1, 3), (1, 4), (1, 5)]
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 711daf7fe415d..ab211845c1957 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -470,13 +470,13 @@ def test_observed_groups_with_nan(observed):
def test_observed_nth():
# GH 26385
cat = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
- ser = pd.Series([1, 2, 3])
+ ser = Series([1, 2, 3])
df = pd.DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = pd.Categorical(["a", "b", "c"], categories=["a", "b", "c"])
- expected = pd.Series([1, np.nan, np.nan], index=index, name="ser")
+ expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
@@ -772,7 +772,7 @@ def test_preserve_on_ordered_ops(func, values):
g = df.groupby("payload")
result = getattr(g, func)()
expected = pd.DataFrame(
- {"payload": [-2, -1], "col": pd.Series(values, dtype=c.dtype)}
+ {"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
@@ -822,9 +822,9 @@ def test_groupby_empty_with_category():
{"A": [None] * 3, "B": pd.Categorical(["train", "train", "test"])}
)
result = df.groupby("A").first()["B"]
- expected = pd.Series(
+ expected = Series(
pd.Categorical([], categories=["test", "train"]),
- index=pd.Series([], dtype="object", name="A"),
+ index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
@@ -1472,11 +1472,11 @@ def test_groupy_first_returned_categorical_instead_of_dataframe(func):
# GH 28641: groupby drops index, when grouping over categorical column with
# first/last. Renamed Categorical instead of DataFrame previously.
df = pd.DataFrame(
- {"A": [1997], "B": pd.Series(["b"], dtype="category").cat.as_ordered()}
+ {"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()}
)
df_grouped = df.groupby("A")["B"]
result = getattr(df_grouped, func)()
- expected = pd.Series(["b"], index=pd.Index([1997], name="A"), name="B")
+ expected = Series(["b"], index=pd.Index([1997], name="A"), name="B")
tm.assert_series_equal(result, expected)
@@ -1543,7 +1543,7 @@ def test_agg_cython_category_not_implemented_fallback():
df["col_cat"] = df["col_num"].astype("category")
result = df.groupby("col_num").col_cat.first()
- expected = pd.Series(
+ expected = Series(
[1, 2, 3], index=pd.Index([1, 2, 3], name="col_num"), name="col_cat"
)
tm.assert_series_equal(result, expected)
@@ -1556,7 +1556,7 @@ def test_agg_cython_category_not_implemented_fallback():
@pytest.mark.parametrize("func", ["min", "max"])
def test_aggregate_categorical_lost_index(func: str):
# GH: 28641 groupby drops index, when grouping over categorical column with min/max
- ds = pd.Series(["b"], dtype="category").cat.as_ordered()
+ ds = Series(["b"], dtype="category").cat.as_ordered()
df = pd.DataFrame({"A": [1997], "B": ds})
result = df.groupby("A").agg({"B": func})
expected = pd.DataFrame({"B": ["b"]}, index=pd.Index([1997], name="A"))
@@ -1652,8 +1652,8 @@ def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals(
idx = pd.Categorical([0, 1])
idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"])
expected_dict = {
- "first": pd.Series([0, np.NaN, np.NaN, 1], idx, name="c"),
- "last": pd.Series([1, np.NaN, np.NaN, 0], idx, name="c"),
+ "first": Series([0, np.NaN, np.NaN, 1], idx, name="c"),
+ "last": Series([1, np.NaN, np.NaN, 0], idx, name="c"),
}
expected = expected_dict[func]
@@ -1677,8 +1677,8 @@ def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals(
idx = pd.Categorical([0, 1])
idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"])
expected_dict = {
- "first": pd.Series([0, np.NaN, np.NaN, 1], idx, name="c"),
- "last": pd.Series([1, np.NaN, np.NaN, 0], idx, name="c"),
+ "first": Series([0, np.NaN, np.NaN, 1], idx, name="c"),
+ "last": Series([1, np.NaN, np.NaN, 0], idx, name="c"),
}
expected = expected_dict[func].to_frame()
diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py
index 997d9b006c802..a5842dee2c43e 100644
--- a/pandas/tests/groupby/test_counting.py
+++ b/pandas/tests/groupby/test_counting.py
@@ -303,12 +303,12 @@ def test_count_non_nulls():
def test_count_object():
df = pd.DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3})
result = df.groupby("c").a.count()
- expected = pd.Series([3, 3], index=pd.Index([2, 3], name="c"), name="a")
+ expected = Series([3, 3], index=pd.Index([2, 3], name="c"), name="a")
tm.assert_series_equal(result, expected)
df = pd.DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3})
result = df.groupby("c").a.count()
- expected = pd.Series([1, 3], index=pd.Index([2, 3], name="c"), name="a")
+ expected = Series([1, 3], index=pd.Index([2, 3], name="c"), name="a")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py
index c16ad812eb634..ad2e61ad99389 100644
--- a/pandas/tests/groupby/test_filters.py
+++ b/pandas/tests/groupby/test_filters.py
@@ -7,9 +7,9 @@
def test_filter_series():
- s = pd.Series([1, 3, 20, 5, 22, 24, 7])
- expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
- expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
+ s = Series([1, 3, 20, 5, 22, 24, 7])
+ expected_odd = Series([1, 3, 5, 7], index=[0, 1, 3, 6])
+ expected_even = Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
tm.assert_series_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)
@@ -63,7 +63,7 @@ def test_filter_mixed_df():
def test_filter_out_all_groups():
- s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ s = Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
@@ -74,7 +74,7 @@ def test_filter_out_all_groups():
def test_filter_out_no_groups():
- s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ s = Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
@@ -108,7 +108,7 @@ def raise_if_sum_is_zero(x):
else:
return x.sum() > 0
- s = pd.Series([-1, 0, 1, 2])
+ s = Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
msg = "the filter must return a boolean result"
@@ -586,12 +586,12 @@ def test_filter_non_bool_raises():
def test_filter_dropna_with_empty_groups():
# GH 10780
- data = pd.Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
+ data = Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
groupped = data.groupby(level=0)
result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False)
- expected_false = pd.Series([np.nan] * 9, index=np.repeat([1, 2, 3], 3))
+ expected_false = Series([np.nan] * 9, index=np.repeat([1, 2, 3], 3))
tm.assert_series_equal(result_false, expected_false)
result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True)
- expected_true = pd.Series(index=pd.Index([], dtype=int), dtype=np.float64)
+ expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64)
tm.assert_series_equal(result_true, expected_true)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index ab736b55b5743..7a309db143758 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -88,13 +88,13 @@ def test_max_min_non_numeric():
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
- pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
+ Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
- pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
+ Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
@@ -157,7 +157,7 @@ def test_arg_passthru():
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
- "category_string": pd.Series(list("abc")).astype("category"),
+ "category_string": Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
@@ -695,7 +695,7 @@ def test_cummin(numpy_dtypes_for_minmax):
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
- expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
+ expected = Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummin()
tm.assert_series_equal(expected, result)
@@ -703,7 +703,7 @@ def test_cummin(numpy_dtypes_for_minmax):
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby("a").b.cummin()
- expected = pd.Series([1, 2, 1], name="b")
+ expected = Series([1, 2, 1], name="b")
tm.assert_series_equal(result, expected)
@@ -753,7 +753,7 @@ def test_cummax(numpy_dtypes_for_minmax):
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
- expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
+ expected = Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummax()
tm.assert_series_equal(expected, result)
@@ -761,7 +761,7 @@ def test_cummax(numpy_dtypes_for_minmax):
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby("a").b.cummax()
- expected = pd.Series([2, 1, 2], name="b")
+ expected = Series([2, 1, 2], name="b")
tm.assert_series_equal(result, expected)
@@ -803,7 +803,7 @@ def test_is_monotonic_increasing(in_vals, out_vals):
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_increasing
index = Index(list("abcd"), name="B")
- expected = pd.Series(index=index, data=out_vals, name="C")
+ expected = Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
@@ -840,7 +840,7 @@ def test_is_monotonic_decreasing(in_vals, out_vals):
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_decreasing
index = Index(list("abcd"), name="B")
- expected = pd.Series(index=index, data=out_vals, name="C")
+ expected = Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
@@ -1054,7 +1054,7 @@ def test_groupby_sum_below_mincount_nullable_integer():
idx = pd.Index([0, 1, 2], dtype=object, name="a")
result = grouped["b"].sum(min_count=2)
- expected = pd.Series([pd.NA] * 3, dtype="Int64", index=idx, name="b")
+ expected = Series([pd.NA] * 3, dtype="Int64", index=idx, name="b")
tm.assert_series_equal(result, expected)
result = grouped.sum(min_count=2)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 087b4f64307e6..a1c00eb5f38f5 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -596,11 +596,11 @@ def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
- expected = pd.Series([2, 4], name="B")
+ expected = Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
- expected = pd.Series(
+ expected = Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
@@ -1188,7 +1188,7 @@ def test_groupby_unit64_float_conversion():
# GH: 30859 groupby converts unit64 to floats sometimes
df = pd.DataFrame({"first": [1], "second": [1], "value": [16148277970000000000]})
result = df.groupby(["first", "second"])["value"].max()
- expected = pd.Series(
+ expected = Series(
[16148277970000000000],
pd.MultiIndex.from_product([[1], [1]], names=["first", "second"]),
name="value",
@@ -1775,7 +1775,7 @@ def test_tuple_as_grouping():
df[["a", "b", "c"]].groupby(("a", "b"))
result = df.groupby(("a", "b"))["c"].sum()
- expected = pd.Series([4], name="c", index=pd.Index([1], name=("a", "b")))
+ expected = Series([4], name="c", index=pd.Index([1], name=("a", "b")))
tm.assert_series_equal(result, expected)
@@ -1824,10 +1824,10 @@ def test_groupby_multiindex_nat():
(datetime(2012, 1, 3), "a"),
]
mi = pd.MultiIndex.from_tuples(values, names=["date", None])
- ser = pd.Series([3, 2, 2.5, 4], index=mi)
+ ser = Series([3, 2, 2.5, 4], index=mi)
result = ser.groupby(level=1).mean()
- expected = pd.Series([3.0, 2.5], index=["a", "b"])
+ expected = Series([3.0, 2.5], index=["a", "b"])
tm.assert_series_equal(result, expected)
@@ -1845,13 +1845,13 @@ def test_groupby_multiindex_series_keys_len_equal_group_axis():
index_array = [["x", "x"], ["a", "b"], ["k", "k"]]
index_names = ["first", "second", "third"]
ri = pd.MultiIndex.from_arrays(index_array, names=index_names)
- s = pd.Series(data=[1, 2], index=ri)
+ s = Series(data=[1, 2], index=ri)
result = s.groupby(["first", "third"]).sum()
index_array = [["x"], ["k"]]
index_names = ["first", "third"]
ei = pd.MultiIndex.from_arrays(index_array, names=index_names)
- expected = pd.Series([3], index=ei)
+ expected = Series([3], index=ei)
tm.assert_series_equal(result, expected)
@@ -1963,18 +1963,18 @@ def test_groupby_only_none_group():
# this was crashing with "ValueError: Length of passed values is 1, index implies 0"
df = pd.DataFrame({"g": [None], "x": 1})
actual = df.groupby("g")["x"].transform("sum")
- expected = pd.Series([np.nan], name="x")
+ expected = Series([np.nan], name="x")
tm.assert_series_equal(actual, expected)
def test_groupby_duplicate_index():
# GH#29189 the groupby call here used to raise
- ser = pd.Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
+ ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
gb = ser.groupby(level=0)
result = gb.mean()
- expected = pd.Series([2, 5.5, 8], index=[2.0, 4.0, 5.0])
+ expected = Series([2, 5.5, 8], index=[2.0, 4.0, 5.0])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 18ef95c05f291..3b3967b858adf 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -615,15 +615,15 @@ def test_list_grouper_with_nat(self):
[
(
"transform",
- pd.Series(name=2, dtype=np.float64, index=pd.RangeIndex(0, 0, 1)),
+ Series(name=2, dtype=np.float64, index=pd.RangeIndex(0, 0, 1)),
),
(
"agg",
- pd.Series(name=2, dtype=np.float64, index=pd.Float64Index([], name=1)),
+ Series(name=2, dtype=np.float64, index=pd.Float64Index([], name=1)),
),
(
"apply",
- pd.Series(name=2, dtype=np.float64, index=pd.Float64Index([], name=1)),
+ Series(name=2, dtype=np.float64, index=pd.Float64Index([], name=1)),
),
],
)
@@ -639,7 +639,7 @@ def test_evaluate_with_empty_groups(self, func, expected):
def test_groupby_empty(self):
# https://github.com/pandas-dev/pandas/issues/27190
- s = pd.Series([], name="name", dtype="float64")
+ s = Series([], name="name", dtype="float64")
gr = s.groupby([])
result = gr.mean()
@@ -778,7 +778,7 @@ def test_get_group_grouped_by_tuple(self):
def test_groupby_with_empty(self):
index = pd.DatetimeIndex(())
data = ()
- series = pd.Series(data, index, dtype=object)
+ series = Series(data, index, dtype=object)
grouper = pd.Grouper(freq="D")
grouped = series.groupby(grouper)
assert next(iter(grouped), None) is None
diff --git a/pandas/tests/groupby/test_missing.py b/pandas/tests/groupby/test_missing.py
index 70d8dfc20822a..2c2147795bc07 100644
--- a/pandas/tests/groupby/test_missing.py
+++ b/pandas/tests/groupby/test_missing.py
@@ -9,7 +9,7 @@
@pytest.mark.parametrize("func", ["ffill", "bfill"])
def test_groupby_column_index_name_lost_fill_funcs(func):
# GH: 29764 groupby loses index sometimes
- df = pd.DataFrame(
+ df = DataFrame(
[[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]],
columns=pd.Index(["type", "a", "b"], name="idx"),
)
@@ -22,10 +22,10 @@ def test_groupby_column_index_name_lost_fill_funcs(func):
@pytest.mark.parametrize("func", ["ffill", "bfill"])
def test_groupby_fill_duplicate_column_names(func):
# GH: 25610 ValueError with duplicate column names
- df1 = pd.DataFrame({"field1": [1, 3, 4], "field2": [1, 3, 4]})
- df2 = pd.DataFrame({"field1": [1, np.nan, 4]})
+ df1 = DataFrame({"field1": [1, 3, 4], "field2": [1, 3, 4]})
+ df2 = DataFrame({"field1": [1, np.nan, 4]})
df_grouped = pd.concat([df1, df2], axis=1).groupby(by=["field2"])
- expected = pd.DataFrame(
+ expected = DataFrame(
[[1, 1.0], [3, np.nan], [4, 4.0]], columns=["field1", "field1"]
)
result = getattr(df_grouped, func)()
@@ -34,7 +34,7 @@ def test_groupby_fill_duplicate_column_names(func):
def test_ffill_missing_arguments():
# GH 14955
- df = pd.DataFrame({"a": [1, 2], "b": [1, 1]})
+ df = DataFrame({"a": [1, 2], "b": [1, 1]})
with pytest.raises(ValueError, match="Must specify a fill"):
df.groupby("b").fillna()
@@ -90,7 +90,7 @@ def test_fill_consistency():
def test_ffill_handles_nan_groups(dropna, method, has_nan_group):
# GH 34725
- df_without_nan_rows = pd.DataFrame([(1, 0.1), (2, 0.2)])
+ df_without_nan_rows = DataFrame([(1, 0.1), (2, 0.2)])
ridx = [-1, 0, -1, -1, 1, -1]
df = df_without_nan_rows.reindex(ridx).reset_index(drop=True)
diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py
index 0cbfbad85a8b6..7dd37163021ed 100644
--- a/pandas/tests/groupby/test_nth.py
+++ b/pandas/tests/groupby/test_nth.py
@@ -142,7 +142,7 @@ def test_first_last_nth_dtypes(df_mixed_floats):
def test_first_last_nth_nan_dtype():
# GH 33591
- df = pd.DataFrame({"data": ["A"], "nans": pd.Series([np.nan], dtype=object)})
+ df = pd.DataFrame({"data": ["A"], "nans": Series([np.nan], dtype=object)})
grouped = df.groupby("data")
expected = df.set_index("data").nans
@@ -386,7 +386,7 @@ def test_first_last_tz(data, expected_first, expected_last):
)
def test_first_last_tz_multi_column(method, ts, alpha):
# GH 21603
- category_string = pd.Series(list("abc")).astype("category")
+ category_string = Series(list("abc")).astype("category")
df = pd.DataFrame(
{
"group": [1, 1, 2],
diff --git a/pandas/tests/groupby/test_nunique.py b/pandas/tests/groupby/test_nunique.py
index c3347b7ae52f3..8e37ac1a1a21d 100644
--- a/pandas/tests/groupby/test_nunique.py
+++ b/pandas/tests/groupby/test_nunique.py
@@ -96,15 +96,15 @@ def test_nunique_with_object():
result = data.groupby(["id", "amount"])["name"].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
- expected = pd.Series([1] * 5, name="name", index=index)
+ expected = Series([1] * 5, name="name", index=index)
tm.assert_series_equal(result, expected)
def test_nunique_with_empty_series():
# GH 12553
- data = pd.Series(name="name", dtype=object)
+ data = Series(name="name", dtype=object)
result = data.groupby(level=0).nunique()
- expected = pd.Series(name="name", dtype="int64")
+ expected = Series(name="name", dtype="int64")
tm.assert_series_equal(result, expected)
@@ -173,5 +173,5 @@ def test_nunique_transform_with_datetime():
# GH 35109 - transform with nunique on datetimes results in integers
df = pd.DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"])
result = df.groupby([0, 0, 1])["date"].transform("nunique")
- expected = pd.Series([2, 2, 1], name="date")
+ expected = Series([2, 2, 1], name="date")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index 4ccbc6a65fd88..4693fe360c819 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -408,7 +408,7 @@ def test_timegrouper_apply_return_type_series(self):
df_dt["date"] = pd.to_datetime(df_dt["date"])
def sumfunc_series(x):
- return pd.Series([x["value"].sum()], ("sum",))
+ return Series([x["value"].sum()], ("sum",))
expected = df.groupby(pd.Grouper(key="date")).apply(sumfunc_series)
result = df_dt.groupby(pd.Grouper(freq="M", key="date")).apply(sumfunc_series)
@@ -754,7 +754,7 @@ def test_scalar_call_versus_list_call(self):
# Issue: 17530
data_frame = {
"location": ["shanghai", "beijing", "shanghai"],
- "time": pd.Series(
+ "time": Series(
["2017-08-09 13:32:23", "2017-08-11 23:23:15", "2017-08-11 22:23:15"],
dtype="datetime64[ns]",
),
@@ -776,10 +776,10 @@ def test_grouper_period_index(self):
index = pd.period_range(
start="2018-01", periods=periods, freq="M", name="Month"
)
- period_series = pd.Series(range(periods), index=index)
+ period_series = Series(range(periods), index=index)
result = period_series.groupby(period_series.index.month).sum()
- expected = pd.Series(
+ expected = Series(
range(0, periods), index=Index(range(1, periods + 1), name=index.name)
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 97be039e16ebb..4b79701a57acd 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -87,7 +87,7 @@ def test_transform_fast():
grp = df.groupby("id")["val"]
values = np.repeat(grp.mean().values, ensure_platform_int(grp.count().values))
- expected = pd.Series(values, index=df.index, name="val")
+ expected = Series(values, index=df.index, name="val")
result = grp.transform(np.mean)
tm.assert_series_equal(result, expected)
@@ -221,7 +221,7 @@ def test_transform_bug():
def test_transform_numeric_to_boolean():
# GH 16875
# inconsistency in transforming boolean values
- expected = pd.Series([True, True], name="A")
+ expected = Series([True, True], name="A")
df = pd.DataFrame({"A": [1.1, 2.2], "B": [1, 2]})
result = df.groupby("B").A.transform(lambda x: True)
@@ -236,7 +236,7 @@ def test_transform_datetime_to_timedelta():
# GH 15429
# transforming a datetime to timedelta
df = DataFrame(dict(A=Timestamp("20130101"), B=np.arange(5)))
- expected = pd.Series([Timestamp("20130101") - Timestamp("20130101")] * 5, name="A")
+ expected = Series([Timestamp("20130101") - Timestamp("20130101")] * 5, name="A")
# this does date math without changing result type in transform
base_time = df["A"][0]
@@ -399,14 +399,14 @@ def test_series_fast_transform_date():
pd.Timestamp("2014-1-2"),
pd.Timestamp("2014-1-4"),
]
- expected = pd.Series(dates, name="d")
+ expected = Series(dates, name="d")
tm.assert_series_equal(result, expected)
def test_transform_length():
# GH 9697
df = pd.DataFrame({"col1": [1, 1, 2, 2], "col2": [1, 2, 3, np.nan]})
- expected = pd.Series([3.0] * 4)
+ expected = Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
@@ -484,9 +484,7 @@ def test_groupby_transform_with_nan_group():
# GH 9941
df = pd.DataFrame({"a": range(10), "b": [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
result = df.groupby(df.b)["a"].transform(max)
- expected = pd.Series(
- [1.0, 1.0, 2.0, 3.0, np.nan, 6.0, 6.0, 9.0, 9.0, 9.0], name="a"
- )
+ expected = Series([1.0, 1.0, 2.0, 3.0, np.nan, 6.0, 6.0, 9.0, 9.0, 9.0], name="a")
tm.assert_series_equal(result, expected)
@@ -625,7 +623,7 @@ def test_cython_transform_series(op, args, targop):
"input, exp",
[
# When everything is NaN
- ({"key": ["b"] * 10, "value": np.nan}, pd.Series([np.nan] * 10, name="value")),
+ ({"key": ["b"] * 10, "value": np.nan}, Series([np.nan] * 10, name="value")),
# When there is a single NaN
(
{"key": ["b"] * 10 + ["a"] * 2, "value": [3] * 3 + [np.nan] + [3] * 8},
@@ -671,7 +669,7 @@ def test_groupby_cum_skipna(op, skipna, input, exp):
expected = exp[(op, skipna)]
else:
expected = exp
- expected = pd.Series(expected, name="value")
+ expected = Series(expected, name="value")
tm.assert_series_equal(expected, result)
@@ -792,7 +790,7 @@ def test_transform_with_non_scalar_group():
@pytest.mark.parametrize(
"cols,exp,comp_func",
[
- ("a", pd.Series([1, 1, 1], name="a"), tm.assert_series_equal),
+ ("a", Series([1, 1, 1], name="a"), tm.assert_series_equal),
(
["a", "c"],
pd.DataFrame({"a": [1, 1, 1], "c": [1, 1, 1]}),
@@ -984,7 +982,7 @@ def test_any_all_np_func(func):
[["foo", True], [np.nan, True], ["foo", True]], columns=["key", "val"]
)
- exp = pd.Series([True, np.nan, True], name="val")
+ exp = Series([True, np.nan, True], name="val")
res = df.groupby("key")["val"].transform(func)
tm.assert_series_equal(res, exp)
@@ -1037,7 +1035,7 @@ def test_groupby_transform_with_datetimes(func, values):
result = stocks.groupby(stocks["week_id"])["price"].transform(func)
- expected = pd.Series(data=pd.to_datetime(values), index=dates, name="price")
+ expected = Series(data=pd.to_datetime(values), index=dates, name="price")
tm.assert_series_equal(result, expected)
@@ -1237,5 +1235,5 @@ def test_categorical_and_not_categorical_key(observed):
)
expected = df_without_categorical.groupby(["A", "C"])["B"].transform("sum")
tm.assert_series_equal(result, expected)
- expected_explicit = pd.Series([4, 2, 4], name="B")
+ expected_explicit = Series([4, 2, 4], name="B")
tm.assert_series_equal(result, expected_explicit)
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index bc178c138341f..d6d8854f4f78a 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -803,7 +803,7 @@ def test_map(self):
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
- lambda values, index: pd.Series(values, index),
+ lambda values, index: Series(values, index),
],
)
def test_map_dictlike(self, mapper):
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index ea6381547009c..ada4902f6900b 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -328,7 +328,7 @@ def test_equals(self):
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
- assert not idx.equals(pd.Series(idx))
+ assert not idx.equals(Series(idx))
idx2 = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
@@ -336,7 +336,7 @@ def test_equals(self):
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
- assert not idx.equals(pd.Series(idx2))
+ assert not idx.equals(Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex(idx.asi8, tz="US/Pacific")
@@ -346,7 +346,7 @@ def test_equals(self):
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
- assert not idx.equals(pd.Series(idx3))
+ assert not idx.equals(Series(idx3))
# check that we do not raise when comparing with OutOfBounds objects
oob = pd.Index([datetime(2500, 1, 1)] * 3, dtype=object)
diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py
index 16af884c89e9e..70580f0a83f83 100644
--- a/pandas/tests/indexes/multi/test_constructors.py
+++ b/pandas/tests/indexes/multi/test_constructors.py
@@ -196,7 +196,7 @@ def test_from_arrays_index_series_datetimetz():
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
- result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
+ result2 = pd.MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
@@ -210,7 +210,7 @@ def test_from_arrays_index_series_timedelta():
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
- result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
+ result2 = pd.MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
@@ -224,7 +224,7 @@ def test_from_arrays_index_series_period():
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
- result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
+ result2 = pd.MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
@@ -244,7 +244,7 @@ def test_from_arrays_index_datetimelike_mixed():
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays(
- [pd.Series(idx1), pd.Series(idx2), pd.Series(idx3), pd.Series(idx4)]
+ [Series(idx1), Series(idx2), Series(idx3), Series(idx4)]
)
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
@@ -263,7 +263,7 @@ def test_from_arrays_index_series_categorical():
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
- result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
+ result2 = pd.MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
@@ -340,8 +340,8 @@ def test_from_arrays_different_lengths(idx1, idx2):
def test_from_arrays_respects_none_names():
# GH27292
- a = pd.Series([1, 2, 3], name="foo")
- b = pd.Series(["a", "b", "c"], name="bar")
+ a = Series([1, 2, 3], name="foo")
+ b = Series(["a", "b", "c"], name="bar")
result = MultiIndex.from_arrays([a, b], names=None)
expected = MultiIndex(
@@ -478,7 +478,7 @@ def test_from_product_datetimeindex():
@pytest.mark.parametrize("ordered", [False, True])
-@pytest.mark.parametrize("f", [lambda x: x, lambda x: pd.Series(x), lambda x: x.values])
+@pytest.mark.parametrize("f", [lambda x: x, lambda x: Series(x), lambda x: x.values])
def test_from_product_index_series_categorical(ordered, f):
# GH13743
first = ["foo", "bar"]
@@ -547,11 +547,11 @@ def test_from_product_iterator():
"a, b, expected_names",
[
(
- pd.Series([1, 2, 3], name="foo"),
- pd.Series(["a", "b"], name="bar"),
+ Series([1, 2, 3], name="foo"),
+ Series(["a", "b"], name="bar"),
["foo", "bar"],
),
- (pd.Series([1, 2, 3], name="foo"), ["a", "b"], ["foo", None]),
+ (Series([1, 2, 3], name="foo"), ["a", "b"], ["foo", None]),
([1, 2, 3], ["a", "b"], None),
],
)
@@ -568,8 +568,8 @@ def test_from_product_infer_names(a, b, expected_names):
def test_from_product_respects_none_names():
# GH27292
- a = pd.Series([1, 2, 3], name="foo")
- b = pd.Series(["a", "b"], name="bar")
+ a = Series([1, 2, 3], name="foo")
+ b = Series(["a", "b"], name="bar")
result = MultiIndex.from_product([a, b], names=None)
expected = MultiIndex(
@@ -649,7 +649,7 @@ def test_from_frame():
@pytest.mark.parametrize(
"non_frame",
[
- pd.Series([1, 2, 3, 4]),
+ Series([1, 2, 3, 4]),
[1, 2, 3, 4],
[[1, 2], [3, 4], [5, 6]],
pd.Index([1, 2, 3, 4]),
diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py
index fec1c0e44cd9f..e1b011b762fe7 100644
--- a/pandas/tests/indexes/multi/test_equivalence.py
+++ b/pandas/tests/indexes/multi/test_equivalence.py
@@ -20,7 +20,7 @@ def test_equals(idx):
if idx.nlevels == 1:
# do not test MultiIndex
- assert not idx.equals(pd.Series(idx))
+ assert not idx.equals(Series(idx))
def test_equals_op(idx):
@@ -258,11 +258,11 @@ def test_multiindex_compare():
midx = pd.MultiIndex.from_product([[0, 1]])
# Equality self-test: MultiIndex object vs self
- expected = pd.Series([True, True])
- result = pd.Series(midx == midx)
+ expected = Series([True, True])
+ result = Series(midx == midx)
tm.assert_series_equal(result, expected)
# Greater than comparison: MultiIndex object vs self
- expected = pd.Series([False, False])
- result = pd.Series(midx > midx)
+ expected = Series([False, False])
+ result = Series(midx > midx)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py
index f85f37e4127c3..678967db72a0b 100644
--- a/pandas/tests/indexes/period/test_constructors.py
+++ b/pandas/tests/indexes/period/test_constructors.py
@@ -5,7 +5,6 @@
from pandas.core.dtypes.dtypes import PeriodDtype
-import pandas as pd
from pandas import (
Index,
NaT,
@@ -195,7 +194,7 @@ def test_constructor_datetime64arr_ok(self, box):
if box is None:
data = data._values
elif box == "series":
- data = pd.Series(data)
+ data = Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
@@ -362,7 +361,7 @@ def test_constructor_nat(self):
period_range(start="2011-01-01", end="NaT", freq="M")
def test_constructor_year_and_quarter(self):
- year = pd.Series([2001, 2002, 2003])
+ year = Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = [f"{t[0]:d}Q{t[1]:d}" for t in zip(quarter, year)]
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 85a01f1c5278c..b6d3c36f1682c 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -143,10 +143,10 @@ def test_getitem_nat(self):
assert idx[0] == Period("2011-01", freq="M")
assert idx[1] is NaT
- s = pd.Series([0, 1, 2], index=idx)
+ s = Series([0, 1, 2], index=idx)
assert s[NaT] == 1
- s = pd.Series(idx, index=idx)
+ s = Series(idx, index=idx)
assert s[Period("2011-01", freq="M")] == Period("2011-01", freq="M")
assert s[NaT] is NaT
@@ -366,7 +366,7 @@ def test_get_loc_invalid_string_raises_keyerror(self):
with pytest.raises(KeyError, match="A"):
pi.get_loc("A")
- ser = pd.Series([1, 2, 3], index=pi)
+ ser = Series([1, 2, 3], index=pi)
with pytest.raises(KeyError, match="A"):
ser.loc["A"]
@@ -687,7 +687,7 @@ def test_get_value(self):
p2 = Period("2017-09-03")
idx0 = PeriodIndex([p0, p1, p2])
- input0 = pd.Series(np.array([1, 2, 3]), index=idx0)
+ input0 = Series(np.array([1, 2, 3]), index=idx0)
expected0 = 2
with tm.assert_produces_warning(FutureWarning):
@@ -695,7 +695,7 @@ def test_get_value(self):
assert result0 == expected0
idx1 = PeriodIndex([p1, p1, p2])
- input1 = pd.Series(np.array([1, 2, 3]), index=idx1)
+ input1 = Series(np.array([1, 2, 3]), index=idx1)
expected1 = input1.iloc[[0, 1]]
with tm.assert_produces_warning(FutureWarning):
@@ -703,7 +703,7 @@ def test_get_value(self):
tm.assert_series_equal(result1, expected1)
idx2 = PeriodIndex([p1, p2, p1])
- input2 = pd.Series(np.array([1, 2, 3]), index=idx2)
+ input2 = Series(np.array([1, 2, 3]), index=idx2)
expected2 = input2.iloc[[0, 2]]
with tm.assert_produces_warning(FutureWarning):
@@ -713,7 +713,7 @@ def test_get_value(self):
def test_loc_str(self):
# https://github.com/pandas-dev/pandas/issues/33964
index = pd.period_range(start="2000", periods=20, freq="B")
- series = pd.Series(range(20), index=index)
+ series = Series(range(20), index=index)
assert series.loc["2000-01-14"] == 9
@pytest.mark.parametrize("freq", ["H", "D"])
@@ -721,7 +721,7 @@ def test_get_value_datetime_hourly(self, freq):
# get_loc and get_value should treat datetime objects symmetrically
dti = date_range("2016-01-01", periods=3, freq="MS")
pi = dti.to_period(freq)
- ser = pd.Series(range(7, 10), index=pi)
+ ser = Series(range(7, 10), index=pi)
ts = dti[0]
@@ -753,14 +753,14 @@ def test_get_value_integer(self):
msg = "index 16801 is out of bounds for axis 0 with size 3"
dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
- ser = pd.Series(range(3), index=pi)
+ ser = Series(range(3), index=pi)
with pytest.raises(IndexError, match=msg):
with tm.assert_produces_warning(FutureWarning):
pi.get_value(ser, 16801)
msg = "index 46 is out of bounds for axis 0 with size 3"
pi2 = dti.to_period("Y") # duplicates, ordinals are all 46
- ser2 = pd.Series(range(3), index=pi2)
+ ser2 = Series(range(3), index=pi2)
with pytest.raises(IndexError, match=msg):
with tm.assert_produces_warning(FutureWarning):
pi2.get_value(ser2, 46)
@@ -776,7 +776,7 @@ def test_contains(self):
ps0 = [p0, p1, p2]
idx0 = PeriodIndex(ps0)
- ser = pd.Series(range(6, 9), index=idx0)
+ ser = Series(range(6, 9), index=idx0)
for p in ps0:
assert p in idx0
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index d1b34c315b682..74ca6ec59736b 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -297,7 +297,7 @@ def test_equals(self, freq):
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
- assert not idx.equals(pd.Series(idx))
+ assert not idx.equals(Series(idx))
idx2 = pd.PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq="H")
assert not idx.equals(idx2)
@@ -305,7 +305,7 @@ def test_equals(self, freq):
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
- assert not idx.equals(pd.Series(idx2))
+ assert not idx.equals(Series(idx2))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(
@@ -317,7 +317,7 @@ def test_equals(self, freq):
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
- assert not idx.equals(pd.Series(idx3))
+ assert not idx.equals(Series(idx3))
def test_freq_setter_deprecated(self):
# GH 20678
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 085d41aaa5b76..8467fde0f7021 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -256,7 +256,7 @@ def _check_all_fields(self, periodindex):
]
periods = list(periodindex)
- s = pd.Series(periodindex)
+ s = Series(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
@@ -307,9 +307,9 @@ def test_period_reindex_with_object(
period_index = PeriodIndex(p_values)
object_index = Index(o_values)
- s = pd.Series(values, index=period_index)
+ s = Series(values, index=period_index)
result = s.reindex(object_index)
- expected = pd.Series(expected_values, index=object_index)
+ expected = Series(expected_values, index=object_index)
tm.assert_series_equal(result, expected)
def test_is_(self):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 8db1bcc84bfa6..0e963531810df 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -137,7 +137,7 @@ def test_constructor_from_index_dtlike(self, cast_as_obj, index):
],
)
def test_constructor_from_series_dtlike(self, index, has_tz):
- result = pd.Index(pd.Series(index))
+ result = pd.Index(Series(index))
tm.assert_index_equal(result, index)
if has_tz:
@@ -168,7 +168,7 @@ def test_constructor_from_frame_series_freq(self):
expected.name = "date"
tm.assert_index_equal(result, expected)
- expected = pd.Series(dts, name="date")
+ expected = Series(dts, name="date")
tm.assert_series_equal(df["date"], expected)
# GH 6274
@@ -215,7 +215,7 @@ def test_constructor_int_dtype_nan_raises(self, dtype):
Index(data, dtype=dtype)
def test_constructor_no_pandas_array(self):
- ser = pd.Series([1, 2, 3])
+ ser = Series([1, 2, 3])
result = pd.Index(ser.array)
expected = pd.Index([1, 2, 3])
tm.assert_index_equal(result, expected)
@@ -879,7 +879,7 @@ def test_map_tseries_indices_accsr_return_index(self):
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
- lambda values, index: pd.Series(values, index),
+ lambda values, index: Series(values, index),
],
)
def test_map_dictlike_simple(self, mapper):
@@ -893,7 +893,7 @@ def test_map_dictlike_simple(self, mapper):
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
- lambda values, index: pd.Series(values, index),
+ lambda values, index: Series(values, index),
],
)
def test_map_dictlike(self, index, mapper):
@@ -2584,7 +2584,7 @@ def test_validate_1d_input():
# GH#13601 trying to assign a multi-dimensional array to an index is not
# allowed
- ser = pd.Series(0, range(4))
+ ser = Series(0, range(4))
with pytest.raises(ValueError, match=msg):
ser.index = np.array([[2, 3]] * 4)
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 3e452e7e2841d..c4429137d17f0 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -72,7 +72,7 @@ def test_nonunique_contains(self):
def test_unknown_attribute(self):
# see gh-9680
tdi = pd.timedelta_range(start=0, periods=10, freq="1s")
- ts = pd.Series(np.random.normal(size=10), index=tdi)
+ ts = Series(np.random.normal(size=10), index=tdi)
assert "foo" not in ts.__dict__.keys()
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
@@ -237,7 +237,7 @@ def test_equals(self):
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
- assert not idx.equals(pd.Series(idx))
+ assert not idx.equals(Series(idx))
idx2 = pd.TimedeltaIndex(["2 days", "1 days", "NaT"])
assert not idx.equals(idx2)
@@ -246,7 +246,7 @@ def test_equals(self):
assert not idx.astype(object).equals(idx2)
assert not idx.astype(object).equals(idx2.astype(object))
assert not idx.equals(list(idx2))
- assert not idx.equals(pd.Series(idx2))
+ assert not idx.equals(Series(idx2))
# Check that we dont raise OverflowError on comparisons outside the
# implementation range
diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py
index 8976e87a1b75a..1f3f59d038ce9 100644
--- a/pandas/tests/indexing/interval/test_interval.py
+++ b/pandas/tests/indexing/interval/test_interval.py
@@ -146,5 +146,5 @@ def test_mi_intervalindex_slicing_with_scalar(self):
idx = pd.MultiIndex.from_arrays([query_df.Item, query_df.RID, query_df.MP])
query_df.index = idx
result = df.value.loc[query_df.index]
- expected = pd.Series([1, 6, 2, 8, 7], index=idx, name="value")
+ expected = Series([1, 6, 2, 8, 7], index=idx, name="value")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 518ec9e997183..6072400d06a36 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -463,7 +463,7 @@ def test_loc_period_string_indexing():
),
)
result = df.loc[("2013Q1", 1111), "OMS"]
- expected = pd.Series(
+ expected = Series(
[np.nan],
dtype=object,
name="OMS",
@@ -482,7 +482,7 @@ def test_loc_datetime_mask_slicing():
data=[[1, 2], [3, 4], [5, 6], [7, 6]], index=m_idx, columns=["C1", "C2"]
)
result = df.loc[(dt_idx[0], (df.index.get_level_values(1) > "2017-05-04")), "C1"]
- expected = pd.Series(
+ expected = Series(
[3],
name="C1",
index=MultiIndex.from_tuples(
@@ -496,7 +496,7 @@ def test_loc_datetime_mask_slicing():
def test_loc_datetime_series_tuple_slicing():
# https://github.com/pandas-dev/pandas/issues/35858
date = pd.Timestamp("2000")
- ser = pd.Series(
+ ser = Series(
1,
index=pd.MultiIndex.from_tuples([("a", date)], names=["a", "b"]),
name="c",
@@ -546,7 +546,7 @@ def test_3levels_leading_period_index():
lev3 = ["B", "C", "Q", "F"]
mi = pd.MultiIndex.from_arrays([pi, lev2, lev3])
- ser = pd.Series(range(4), index=mi, dtype=np.float64)
+ ser = Series(range(4), index=mi, dtype=np.float64)
result = ser.loc[(pi[0], "A", "B")]
assert result == 0.0
@@ -562,7 +562,7 @@ def test_missing_keys_raises_keyerror(self):
def test_missing_key_raises_keyerror2(self):
# GH#21168 KeyError, not "IndexingError: Too many indexers"
- ser = pd.Series(-1, index=pd.MultiIndex.from_product([[0, 1]] * 2))
+ ser = Series(-1, index=pd.MultiIndex.from_product([[0, 1]] * 2))
with pytest.raises(KeyError, match=r"\(0, 3\)"):
ser.loc[0, 3]
diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py
index ec0391a2ccc26..c1b41c6f5d8cf 100644
--- a/pandas/tests/indexing/multiindex/test_slice.py
+++ b/pandas/tests/indexing/multiindex/test_slice.py
@@ -528,7 +528,7 @@ def test_loc_ax_single_level_indexer_simple_df(self):
# test single level indexing on single index column data frame
df = pd.DataFrame(np.arange(9).reshape(3, 3), columns=["a", "b", "c"])
result = df.loc(axis=1)["a"]
- expected = pd.Series(np.array([0, 3, 6]), name="a")
+ expected = Series(np.array([0, 3, 6]), name="a")
tm.assert_series_equal(result, expected)
def test_per_axis_per_level_setitem(self):
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index fae229aecc3d4..347ce2262a261 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -733,7 +733,7 @@ def test_map_with_dict_or_series(self):
new_values, name="XXX", categories=[3.0, 2, "one"]
)
- mapper = pd.Series(new_values[:-1], index=orig_values[:-1])
+ mapper = Series(new_values[:-1], index=orig_values[:-1])
output = cur_index.map(mapper)
# Order of categories in output can be different
tm.assert_index_equal(expected, output)
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 752ecd47fe089..04790cdf6cc9d 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -124,7 +124,7 @@ def test_setitem_series_int8(self, val, exp_dtype, request):
exp = pd.Series([1, 0, 3, 4], dtype=np.int8)
self._assert_setitem_series_conversion(obj, val, exp, np.int8)
mark = pytest.mark.xfail(
- reason="BUG: it must be Series([1, 1, 3, 4], dtype=np.int16"
+ reason="BUG: it must be pd.Series([1, 1, 3, 4], dtype=np.int16"
)
request.node.add_marker(mark)
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index ad71b6b72df33..f5e6aea5f8db8 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -302,7 +302,7 @@ def test_loc_getitem_across_dst(self):
idx = pd.date_range(
"2017-10-29 01:30:00", tz="Europe/Berlin", periods=5, freq="30 min"
)
- series2 = pd.Series([0, 1, 2, 3, 4], index=idx)
+ series2 = Series([0, 1, 2, 3, 4], index=idx)
t_1 = pd.Timestamp(
"2017-10-29 02:30:00+02:00", tz="Europe/Berlin", freq="30min"
@@ -311,7 +311,7 @@ def test_loc_getitem_across_dst(self):
"2017-10-29 02:00:00+01:00", tz="Europe/Berlin", freq="30min"
)
result = series2.loc[t_1:t_2]
- expected = pd.Series([2, 3], index=idx[2:4])
+ expected = Series([2, 3], index=idx[2:4])
tm.assert_series_equal(result, expected)
result = series2[t_1]
@@ -322,10 +322,10 @@ def test_loc_incremental_setitem_with_dst(self):
# GH 20724
base = datetime(2015, 11, 1, tzinfo=tz.gettz("US/Pacific"))
idxs = [base + timedelta(seconds=i * 900) for i in range(16)]
- result = pd.Series([0], index=[idxs[0]])
+ result = Series([0], index=[idxs[0]])
for ts in idxs:
result.loc[ts] = 1
- expected = pd.Series(1, index=idxs)
+ expected = Series(1, index=idxs)
tm.assert_series_equal(result, expected)
def test_loc_setitem_with_existing_dst(self):
@@ -372,7 +372,7 @@ def test_loc_label_slicing(self):
)
def test_getitem_slice_date(self, slice_, positions):
# https://github.com/pandas-dev/pandas/issues/31501
- s = pd.Series(
+ s = Series(
[0, 1, 2],
pd.DatetimeIndex(["2019-01-01", "2019-01-01T06:00:00", "2019-01-02"]),
)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index d3d455f83c41a..f94f1d6aa453f 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -51,7 +51,7 @@ class TestiLoc2:
def test_is_scalar_access(self):
# GH#32085 index with duplicates doesnt matter for _is_scalar_access
index = pd.Index([1, 2, 1])
- ser = pd.Series(range(3), index=index)
+ ser = Series(range(3), index=index)
assert ser.iloc._is_scalar_access((1,))
@@ -699,7 +699,7 @@ def test_indexing_zerodim_np_array(self):
# GH24919
df = DataFrame([[1, 2], [3, 4]])
result = df.iloc[np.array(0)]
- s = pd.Series([1, 2], name=0)
+ s = Series([1, 2], name=0)
tm.assert_series_equal(result, s)
def test_series_indexing_zerodim_np_array(self):
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index fd83f9ab29407..b4ea92fae1136 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -329,7 +329,7 @@ def test_dups_fancy_indexing2(self):
@pytest.mark.parametrize("case", [lambda s: s, lambda s: s.loc])
def test_duplicate_int_indexing(self, case):
# GH 17347
- s = pd.Series(range(3), index=[1, 1, 3])
+ s = Series(range(3), index=[1, 1, 3])
expected = s[1]
result = case(s)[[1]]
tm.assert_series_equal(result, expected)
@@ -998,9 +998,7 @@ def test_extension_array_cross_section():
},
index=["a", "b"],
)
- expected = pd.Series(
- pd.core.arrays.integer_array([1, 3]), index=["A", "B"], name="a"
- )
+ expected = Series(pd.core.arrays.integer_array([1, 3]), index=["A", "B"], name="a")
result = df.loc["a"]
tm.assert_series_equal(result, expected)
@@ -1014,7 +1012,7 @@ def test_extension_array_cross_section_converts():
{"A": pd.array([1, 2], dtype="Int64"), "B": np.array([1, 2])}, index=["a", "b"]
)
result = df.loc["a"]
- expected = pd.Series([1, 1], dtype="Int64", index=["A", "B"], name="a")
+ expected = Series([1, 1], dtype="Int64", index=["A", "B"], name="a")
tm.assert_series_equal(result, expected)
result = df.iloc[0]
@@ -1026,7 +1024,7 @@ def test_extension_array_cross_section_converts():
index=["a", "b"],
)
result = df.loc["a"]
- expected = pd.Series([1, "a"], dtype=object, index=["A", "B"], name="a")
+ expected = Series([1, "a"], dtype=object, index=["A", "B"], name="a")
tm.assert_series_equal(result, expected)
result = df.iloc[0]
@@ -1049,7 +1047,7 @@ def test_readonly_indices():
def test_1tuple_without_multiindex():
- ser = pd.Series(range(5))
+ ser = Series(range(5))
key = (slice(3),)
result = ser[key]
@@ -1059,7 +1057,7 @@ def test_1tuple_without_multiindex():
def test_duplicate_index_mistyped_key_raises_keyerror():
# GH#29189 float_index.get_loc(None) should raise KeyError, not TypeError
- ser = pd.Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
+ ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
with pytest.raises(KeyError, match="None"):
ser[None]
@@ -1072,17 +1070,17 @@ def test_duplicate_index_mistyped_key_raises_keyerror():
def test_setitem_with_bool_mask_and_values_matching_n_trues_in_length():
# GH 30567
- ser = pd.Series([None] * 10)
+ ser = Series([None] * 10)
mask = [False] * 3 + [True] * 5 + [False] * 2
ser[mask] = range(5)
result = ser
- expected = pd.Series([None] * 3 + list(range(5)) + [None] * 2).astype("object")
+ expected = Series([None] * 3 + list(range(5)) + [None] * 2).astype("object")
tm.assert_series_equal(result, expected)
def test_missing_labels_inside_loc_matched_in_error_message():
# GH34272
- s = pd.Series({"a": 1, "b": 2, "c": 3})
+ s = Series({"a": 1, "b": 2, "c": 3})
error_message_regex = "missing_0.*missing_1.*missing_2"
with pytest.raises(KeyError, match=error_message_regex):
s.loc[["a", "b", "missing_0", "c", "missing_1", "missing_2"]]
@@ -1092,7 +1090,7 @@ def test_many_missing_labels_inside_loc_error_message_limited():
# GH34272
n = 10000
missing_labels = [f"missing_{label}" for label in range(n)]
- s = pd.Series({"a": 1, "b": 2, "c": 3})
+ s = Series({"a": 1, "b": 2, "c": 3})
# regex checks labels between 4 and 9995 are replaced with ellipses
error_message_regex = "missing_4.*\\.\\.\\..*missing_9995"
with pytest.raises(KeyError, match=error_message_regex):
@@ -1101,7 +1099,7 @@ def test_many_missing_labels_inside_loc_error_message_limited():
def test_long_text_missing_labels_inside_loc_error_message_limited():
# GH34272
- s = pd.Series({"a": 1, "b": 2, "c": 3})
+ s = Series({"a": 1, "b": 2, "c": 3})
missing_labels = [f"long_missing_label_text_{i}" * 5 for i in range(3)]
# regex checks for very long labels there are new lines between each
error_message_regex = "long_missing_label_text_0.*\\\\n.*long_missing_label_text_1"
@@ -1111,7 +1109,7 @@ def test_long_text_missing_labels_inside_loc_error_message_limited():
def test_setitem_categorical():
# https://github.com/pandas-dev/pandas/issues/35369
- df = pd.DataFrame({"h": pd.Series(list("mn")).astype("category")})
+ df = pd.DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = pd.DataFrame(
{"h": pd.Categorical(["m", "n"]).reorder_categories(["n", "m"])}
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 9b9bca77e17ec..e7f2ad6e8d735 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -737,15 +737,15 @@ def test_setitem_new_key_tz(self):
pd.to_datetime(42).tz_localize("UTC"),
pd.to_datetime(666).tz_localize("UTC"),
]
- expected = pd.Series(vals, index=["foo", "bar"])
+ expected = Series(vals, index=["foo", "bar"])
- ser = pd.Series(dtype=object)
+ ser = Series(dtype=object)
ser["foo"] = vals[0]
ser["bar"] = vals[1]
tm.assert_series_equal(ser, expected)
- ser = pd.Series(dtype=object)
+ ser = Series(dtype=object)
ser.loc["foo"] = vals[0]
ser.loc["bar"] = vals[1]
@@ -907,9 +907,7 @@ def test_loc_copy_vs_view(self):
def test_loc_uint64(self):
# GH20722
# Test whether loc accept uint64 max value as index.
- s = pd.Series(
- [1, 2], index=[np.iinfo("uint64").max - 1, np.iinfo("uint64").max]
- )
+ s = Series([1, 2], index=[np.iinfo("uint64").max - 1, np.iinfo("uint64").max])
result = s.loc[np.iinfo("uint64").max - 1]
expected = s.iloc[0]
@@ -961,7 +959,7 @@ def test_indexing_zerodim_np_array(self):
# GH24924
df = DataFrame([[1, 2], [3, 4]])
result = df.loc[np.array(0)]
- s = pd.Series([1, 2], name=0)
+ s = Series([1, 2], name=0)
tm.assert_series_equal(result, s)
def test_series_indexing_zerodim_np_array(self):
@@ -975,7 +973,7 @@ def test_loc_reverse_assignment(self):
data = [1, 2, 3, 4, 5, 6] + [None] * 4
expected = Series(data, index=range(2010, 2020))
- result = pd.Series(index=range(2010, 2020), dtype=np.float64)
+ result = Series(index=range(2010, 2020), dtype=np.float64)
result.loc[2015:2010:-1] = [6, 5, 4, 3, 2, 1]
tm.assert_series_equal(result, expected)
@@ -1054,7 +1052,7 @@ def test_loc_set_dataframe_multiindex():
def test_loc_mixed_int_float():
# GH#19456
- ser = pd.Series(range(2), pd.Index([1, 2.0], dtype=object))
+ ser = Series(range(2), pd.Index([1, 2.0], dtype=object))
result = ser.loc[1]
assert result == 0
@@ -1062,12 +1060,12 @@ def test_loc_mixed_int_float():
def test_loc_with_positional_slice_deprecation():
# GH#31840
- ser = pd.Series(range(4), index=["A", "B", "C", "D"])
+ ser = Series(range(4), index=["A", "B", "C", "D"])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
ser.loc[:3] = 2
- expected = pd.Series([2, 2, 2, 3], index=["A", "B", "C", "D"])
+ expected = Series([2, 2, 2, 3], index=["A", "B", "C", "D"])
tm.assert_series_equal(ser, expected)
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 337ec683ee745..6005f7800178c 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -664,7 +664,7 @@ def test_indexing_timeseries_regression(self):
def test_index_name_empty(self):
# GH 31368
df = pd.DataFrame({}, index=pd.RangeIndex(0, name="df_index"))
- series = pd.Series(1.23, index=pd.RangeIndex(4, name="series_index"))
+ series = Series(1.23, index=pd.RangeIndex(4, name="series_index"))
df["series"] = series
expected = pd.DataFrame(
@@ -675,7 +675,7 @@ def test_index_name_empty(self):
# GH 36527
df = pd.DataFrame()
- series = pd.Series(1.23, index=pd.RangeIndex(4, name="series_index"))
+ series = Series(1.23, index=pd.RangeIndex(4, name="series_index"))
df["series"] = series
expected = pd.DataFrame(
{"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index")
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index cd2f5a903d8cc..9a0bfa5c605d9 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -1216,8 +1216,8 @@ def test_validate_ndim():
def test_block_shape():
idx = pd.Index([0, 1, 2, 3, 4])
- a = pd.Series([1, 2, 3]).reindex(idx)
- b = pd.Series(pd.Categorical([1, 2, 3])).reindex(idx)
+ a = Series([1, 2, 3]).reindex(idx)
+ b = Series(pd.Categorical([1, 2, 3])).reindex(idx)
assert a._mgr.blocks[0].mgr_locs.indexer == b._mgr.blocks[0].mgr_locs.indexer
@@ -1285,7 +1285,7 @@ def test_interleave_non_unique_cols():
def test_single_block_manager_fastpath_deprecated():
# GH#33092
- ser = pd.Series(range(3))
+ ser = Series(range(3))
blk = ser._data.blocks[0]
with tm.assert_produces_warning(FutureWarning):
SingleBlockManager(blk, ser.index, fastpath=True)
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 800b4c79b9c09..7c507b0e371e8 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -970,7 +970,7 @@ def test_read_excel_squeeze(self, read_ext):
f = "test_squeeze" + read_ext
actual = pd.read_excel(f, sheet_name="two_columns", index_col=0, squeeze=True)
- expected = pd.Series([2, 3, 4], [4, 5, 6], name="b")
+ expected = Series([2, 3, 4], [4, 5, 6], name="b")
expected.index.name = "a"
tm.assert_series_equal(actual, expected)
@@ -979,7 +979,7 @@ def test_read_excel_squeeze(self, read_ext):
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(f, sheet_name="one_column", squeeze=True)
- expected = pd.Series([1, 2, 3], name="a")
+ expected = Series([1, 2, 3], name="a")
tm.assert_series_equal(actual, expected)
def test_deprecated_kwargs(self, read_ext):
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index dd85db19af959..239bb54f48c16 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -2031,9 +2031,9 @@ def test_period(self):
def gen_series_formatting():
- s1 = pd.Series(["a"] * 100)
- s2 = pd.Series(["ab"] * 100)
- s3 = pd.Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
+ s1 = Series(["a"] * 100)
+ s2 = Series(["ab"] * 100)
+ s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
@@ -2529,7 +2529,7 @@ def test_max_multi_index_display(self):
# Make sure #8532 is fixed
def test_consistent_format(self):
- s = pd.Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
+ s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
@@ -2618,12 +2618,12 @@ def test_show_dimensions(self):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
- s = pd.Series(range(20))
+ s = Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
- s = pd.Series(range(61))
+ s = Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
@@ -2671,19 +2671,19 @@ def test_to_string_length(self):
assert res == exp
def test_to_string_na_rep(self):
- s = pd.Series(index=range(100), dtype=np.float64)
+ s = Series(index=range(100), dtype=np.float64)
res = s.to_string(na_rep="foo", max_rows=2)
exp = "0 foo\n ..\n99 foo"
assert res == exp
def test_to_string_float_format(self):
- s = pd.Series(range(10), dtype="float64")
+ s = Series(range(10), dtype="float64")
res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
exp = "0 0.0\n ..\n9 9.0"
assert res == exp
def test_to_string_header(self):
- s = pd.Series(range(10), dtype="int64")
+ s = Series(range(10), dtype="int64")
s.index.name = "foo"
res = s.to_string(header=True, max_rows=2)
exp = "foo\n0 0\n ..\n9 9"
@@ -2703,7 +2703,7 @@ def test_to_string_multindex_header(self):
def test_to_string_empty_col(self):
# GH 13653
- s = pd.Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
+ s = Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
res = s.to_string(index=False)
exp = " \n Hello\n World\n \n \nMooooo\n \n "
assert re.match(exp, res)
@@ -2760,7 +2760,7 @@ def __getitem__(self, ix):
def dtype(self):
return DtypeStub()
- series = pd.Series(ExtTypeStub())
+ series = Series(ExtTypeStub())
res = repr(series) # This line crashed before #33770 was fixed.
expected = "0 [False True]\n" + "1 [ True False]\n" + "dtype: DtypeStub"
assert res == expected
@@ -2787,7 +2787,7 @@ def test_output_display_precision_trailing_zeroes(self):
# Happens when display precision is set to zero
with pd.option_context("display.precision", 0):
- s = pd.Series([840.0, 4200.0])
+ s = Series([840.0, 4200.0])
expected_output = "0 840\n1 4200\ndtype: float64"
assert str(s) == expected_output
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 908fdea2f73d0..b2edb5309f299 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -979,11 +979,11 @@ def multiindex_frame(self):
"""Multiindex dataframe for testing multirow LaTeX macros."""
yield DataFrame.from_dict(
{
- ("c1", 0): pd.Series({x: x for x in range(4)}),
- ("c1", 1): pd.Series({x: x + 4 for x in range(4)}),
- ("c2", 0): pd.Series({x: x for x in range(4)}),
- ("c2", 1): pd.Series({x: x + 4 for x in range(4)}),
- ("c3", 0): pd.Series({x: x for x in range(4)}),
+ ("c1", 0): Series({x: x for x in range(4)}),
+ ("c1", 1): Series({x: x + 4 for x in range(4)}),
+ ("c2", 0): Series({x: x for x in range(4)}),
+ ("c2", 1): Series({x: x + 4 for x in range(4)}),
+ ("c3", 0): Series({x: x for x in range(4)}),
}
).T
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index e12424888f4af..4d8d4ecb50a5a 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -796,7 +796,7 @@ def test_date_index_and_values(self, date_format, as_object, date_typ):
if as_object:
data.append("a")
- ser = pd.Series(data, index=data)
+ ser = Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
@@ -1042,7 +1042,7 @@ def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
if as_object:
data.append("a")
- ser = pd.Series(data, index=data)
+ ser = Series(data, index=data)
if date_format == "iso":
expected = (
'{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
@@ -1150,7 +1150,7 @@ def test_sparse(self):
expected = df.to_json()
assert expected == sdf.to_json()
- s = pd.Series(np.random.randn(10))
+ s = Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.astype("Sparse")
@@ -1730,5 +1730,5 @@ def test_json_pandas_nulls(self, nulls_fixture):
def test_readjson_bool_series(self):
# GH31464
result = read_json("[true, true, false]", typ="series")
- expected = pd.Series([True, True, False])
+ expected = Series([True, True, False])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 42614f1eee8af..7eeba97b799ae 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -661,7 +661,7 @@ def test_walk(self, where, expected, setup_path):
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
- "s1": pd.Series([10, 9, 8]),
+ "s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
@@ -1113,7 +1113,7 @@ def test_latin_encoding(self, setup_path, dtype, val):
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
- ser = pd.Series(val, dtype=dtype)
+ ser = Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
@@ -1509,7 +1509,7 @@ def test_to_hdf_with_min_itemsize(self, setup_path):
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
- ser = pd.Series(data, index=pd.Index(data))
+ ser = Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
@@ -4502,7 +4502,7 @@ def test_categorical_nan_only_columns(self, setup_path):
"a": ["a", "b", "c", np.nan],
"b": [np.nan, np.nan, np.nan, np.nan],
"c": [1, 2, 3, 4],
- "d": pd.Series([None] * 4, dtype=object),
+ "d": Series([None] * 4, dtype=object),
}
)
df["a"] = df.a.astype("category")
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 88f61390957a6..30926b2bd0241 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1059,7 +1059,7 @@ def test_categorical_order(self, file):
(col, pd.Categorical.from_codes(codes, labels, ordered=True))
)
else:
- cols.append((col, pd.Series(labels, dtype=np.float32)))
+ cols.append((col, Series(labels, dtype=np.float32)))
expected = DataFrame.from_dict(dict(cols))
# Read with and with out categoricals, ensure order is identical
@@ -1089,7 +1089,7 @@ def test_categorical_sorting(self, file):
cat = pd.Categorical.from_codes(
codes=codes, categories=categories, ordered=True
)
- expected = pd.Series(cat, name="srh")
+ expected = Series(cat, name="srh")
tm.assert_series_equal(expected, parsed["srh"])
@pytest.mark.parametrize("file", ["dta19_115", "dta19_117"])
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index d56c882471a9a..271cf65433afe 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -901,7 +901,7 @@ def test_xticklabels(self):
def test_xtick_barPlot(self):
# GH28172
- s = pd.Series(range(10), index=[f"P{i:02d}" for i in range(10)])
+ s = Series(range(10), index=[f"P{i:02d}" for i in range(10)])
ax = s.plot.bar(xticks=range(0, 11, 2))
exp = np.array(list(range(0, 11, 2)))
tm.assert_numpy_array_equal(exp, ax.get_xticks())
@@ -947,7 +947,7 @@ def test_plot_xlim_for_series(self, kind):
def test_plot_no_rows(self):
# GH 27758
- df = pd.Series(dtype=int)
+ df = Series(dtype=int)
assert df.empty
ax = df.plot()
assert len(ax.get_lines()) == 1
@@ -956,12 +956,12 @@ def test_plot_no_rows(self):
assert len(line.get_ydata()) == 0
def test_plot_no_numeric_data(self):
- df = pd.Series(["a", "b", "c"])
+ df = Series(["a", "b", "c"])
with pytest.raises(TypeError):
df.plot()
def test_style_single_ok(self):
- s = pd.Series([1, 2])
+ s = Series([1, 2])
ax = s.plot(style="s", color="C3")
assert ax.lines[0].get_color() == "C3"
@@ -972,7 +972,7 @@ def test_style_single_ok(self):
@pytest.mark.parametrize("kind", ["line", "area", "bar"])
def test_xlabel_ylabel_series(self, kind, index_name, old_label, new_label):
# GH 9093
- ser = pd.Series([1, 2, 3, 4])
+ ser = Series([1, 2, 3, 4])
ser.index.name = index_name
# default is the ylabel is not shown and xlabel is index name
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index fe97925c2bb74..02231f0431d9f 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -643,40 +643,40 @@ def test_empty(self, method, unit, use_bottleneck, dtype):
df = DataFrame(np.empty((10, 0)), dtype=dtype)
assert (getattr(df, method)(1) == unit).all()
- s = pd.Series([1], dtype=dtype)
+ s = Series([1], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert pd.isna(result)
result = getattr(s, method)(skipna=False, min_count=2)
assert pd.isna(result)
- s = pd.Series([np.nan], dtype=dtype)
+ s = Series([np.nan], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert pd.isna(result)
- s = pd.Series([np.nan, 1], dtype=dtype)
+ s = Series([np.nan, 1], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert pd.isna(result)
@pytest.mark.parametrize("method, unit", [("sum", 0.0), ("prod", 1.0)])
def test_empty_multi(self, method, unit):
- s = pd.Series(
+ s = Series(
[1, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product([("a", "b"), (0, 1)]),
)
# 1 / 0 by default
result = getattr(s, method)(level=0)
- expected = pd.Series([1, unit], index=["a", "b"])
+ expected = Series([1, unit], index=["a", "b"])
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(s, method)(level=0, min_count=0)
- expected = pd.Series([1, unit], index=["a", "b"])
+ expected = Series([1, unit], index=["a", "b"])
tm.assert_series_equal(result, expected)
# min_count=1
result = getattr(s, method)(level=0, min_count=1)
- expected = pd.Series([1, np.nan], index=["a", "b"])
+ expected = Series([1, np.nan], index=["a", "b"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["mean", "median", "std", "var"])
@@ -844,13 +844,13 @@ def test_idxmax(self):
# Float64Index
# GH#5914
- s = pd.Series([1, 2, 3], [1.1, 2.1, 3.1])
+ s = Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
- s = pd.Series(s.index, s.index)
+ s = Series(s.index, s.index)
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
@@ -876,7 +876,7 @@ def test_all_any_params(self):
assert not s2.any(skipna=True)
# Check level.
- s = pd.Series([False, False, True, True, False, True], index=[0, 0, 1, 1, 2, 2])
+ s = Series([False, False, True, True, False, True], index=[0, 0, 1, 1, 2, 2])
tm.assert_series_equal(s.all(level=0), Series([False, True, False]))
tm.assert_series_equal(s.any(level=0), Series([False, True, True]))
@@ -908,7 +908,7 @@ def test_all_any_boolean(self):
assert not s4.any(skipna=False)
# Check level TODO(GH-33449) result should also be boolean
- s = pd.Series(
+ s = Series(
[False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2],
dtype="boolean",
@@ -920,7 +920,7 @@ def test_any_axis1_bool_only(self):
# GH#32432
df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
result = df.any(axis=1, bool_only=True)
- expected = pd.Series([True, False])
+ expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_timedelta64_analytics(self):
@@ -968,12 +968,12 @@ def test_timedelta64_analytics(self):
@pytest.mark.parametrize(
"test_input,error_type",
[
- (pd.Series([], dtype="float64"), ValueError),
+ (Series([], dtype="float64"), ValueError),
# For strings, or any Series with dtype 'O'
- (pd.Series(["foo", "bar", "baz"]), TypeError),
- (pd.Series([(1,), (2,)]), TypeError),
+ (Series(["foo", "bar", "baz"]), TypeError),
+ (Series([(1,), (2,)]), TypeError),
# For mixed data types
- (pd.Series(["foo", "foo", "bar", "bar", None, np.nan, "baz"]), TypeError),
+ (Series(["foo", "foo", "bar", "bar", None, np.nan, "baz"]), TypeError),
],
)
def test_assert_idxminmax_raises(self, test_input, error_type):
@@ -991,7 +991,7 @@ def test_assert_idxminmax_raises(self, test_input, error_type):
def test_idxminmax_with_inf(self):
# For numeric data with NA and Inf (GH #13595)
- s = pd.Series([0, -np.inf, np.inf, np.nan])
+ s = Series([0, -np.inf, np.inf, np.nan])
assert s.idxmin() == 1
assert np.isnan(s.idxmin(skipna=False))
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 28d33ebb23c20..1b9145679fb12 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -120,7 +120,7 @@ def test_resample_count_empty_series(freq, empty_series_dti, resample_method):
index = _asfreq_compat(empty_series_dti.index, freq)
- expected = pd.Series([], dtype="int64", index=index, name=empty_series_dti.name)
+ expected = Series([], dtype="int64", index=index, name=empty_series_dti.name)
tm.assert_series_equal(result, expected)
@@ -174,7 +174,7 @@ def test_resample_size_empty_dataframe(freq, empty_frame_dti):
index = _asfreq_compat(empty_frame_dti.index, freq)
- expected = pd.Series([], dtype="int64", index=index)
+ expected = Series([], dtype="int64", index=index)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 9475dcc6981ff..07e47650d0c24 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -111,7 +111,7 @@ def test_resample_basic(series, closed, expected):
def test_resample_integerarray():
# GH 25580, resample on IntegerArray
- ts = pd.Series(
+ ts = Series(
range(9), index=pd.date_range("1/1/2000", periods=9, freq="T"), dtype="Int64"
)
result = ts.resample("3T").sum()
@@ -849,7 +849,7 @@ def test_resample_origin_epoch_with_tz_day_vs_24h():
start, end = "2000-10-01 23:30:00+0500", "2000-12-02 00:30:00+0500"
rng = pd.date_range(start, end, freq="7min")
random_values = np.random.randn(len(rng))
- ts_1 = pd.Series(random_values, index=rng)
+ ts_1 = Series(random_values, index=rng)
result_1 = ts_1.resample("D", origin="epoch").mean()
result_2 = ts_1.resample("24H", origin="epoch").mean()
@@ -865,7 +865,7 @@ def test_resample_origin_epoch_with_tz_day_vs_24h():
# check that we have the similar results with two different timezones (+2H and +5H)
start, end = "2000-10-01 23:30:00+0200", "2000-12-02 00:30:00+0200"
rng = pd.date_range(start, end, freq="7min")
- ts_2 = pd.Series(random_values, index=rng)
+ ts_2 = Series(random_values, index=rng)
result_5 = ts_2.resample("D", origin="epoch").mean()
result_6 = ts_2.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1.tz_localize(None), result_5.tz_localize(None))
@@ -877,7 +877,7 @@ def test_resample_origin_with_day_freq_on_dst():
tz = "America/Chicago"
def _create_series(values, timestamps, freq="D"):
- return pd.Series(
+ return Series(
values,
index=pd.DatetimeIndex(
[Timestamp(t, tz=tz) for t in timestamps], freq=freq, ambiguous=True
@@ -888,7 +888,7 @@ def _create_series(values, timestamps, freq="D"):
start = pd.Timestamp("2013-11-02", tz=tz)
end = pd.Timestamp("2013-11-03 23:59", tz=tz)
rng = pd.date_range(start, end, freq="1h")
- ts = pd.Series(np.ones(len(rng)), index=rng)
+ ts = Series(np.ones(len(rng)), index=rng)
expected = _create_series([24.0, 25.0], ["2013-11-02", "2013-11-03"])
for origin in ["epoch", "start", "start_day", start, None]:
@@ -899,7 +899,7 @@ def _create_series(values, timestamps, freq="D"):
start = pd.Timestamp("2013-11-03", tz=tz)
end = pd.Timestamp("2013-11-03 23:59", tz=tz)
rng = pd.date_range(start, end, freq="1h")
- ts = pd.Series(np.ones(len(rng)), index=rng)
+ ts = Series(np.ones(len(rng)), index=rng)
expected_ts = ["2013-11-02 22:00-05:00", "2013-11-03 22:00-06:00"]
expected = _create_series([23.0, 2.0], expected_ts)
@@ -1689,9 +1689,7 @@ def test_resample_equivalent_offsets(n1, freq1, n2, freq2, k):
# GH 24127
n1_ = n1 * k
n2_ = n2 * k
- s = pd.Series(
- 0, index=pd.date_range("19910905 13:00", "19911005 07:00", freq=freq1)
- )
+ s = Series(0, index=pd.date_range("19910905 13:00", "19911005 07:00", freq=freq1))
s = s + range(len(s))
result1 = s.resample(str(n1_) + freq1).mean()
@@ -1781,9 +1779,9 @@ def test_resample_calendar_day_with_dst(
first: str, last: str, freq_in: str, freq_out: str, exp_last: str
):
# GH 35219
- ts = pd.Series(1.0, pd.date_range(first, last, freq=freq_in, tz="Europe/Amsterdam"))
+ ts = Series(1.0, pd.date_range(first, last, freq=freq_in, tz="Europe/Amsterdam"))
result = ts.resample(freq_out).pad()
- expected = pd.Series(
+ expected = Series(
1.0, pd.date_range(first, exp_last, freq=freq_out, tz="Europe/Amsterdam")
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/resample/test_deprecated.py b/pandas/tests/resample/test_deprecated.py
index 8b3adbf08d157..24695a38a85ac 100644
--- a/pandas/tests/resample/test_deprecated.py
+++ b/pandas/tests/resample/test_deprecated.py
@@ -226,7 +226,7 @@ def test_loffset_returns_datetimeindex(frame, kind, agg_arg):
)
def test_resample_with_non_zero_base(start, end, start_freq, end_freq, base, offset):
# GH 23882
- s = pd.Series(0, index=pd.period_range(start, end, freq=start_freq))
+ s = Series(0, index=pd.period_range(start, end, freq=start_freq))
s = s + np.arange(len(s))
with tm.assert_produces_warning(FutureWarning):
result = s.resample(end_freq, base=base).mean()
diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py
index fe02eaef8ba82..f5b655ebd416b 100644
--- a/pandas/tests/resample/test_period_index.py
+++ b/pandas/tests/resample/test_period_index.py
@@ -816,7 +816,7 @@ def test_resample_with_only_nat(self):
)
def test_resample_with_offset(self, start, end, start_freq, end_freq, offset):
# GH 23882 & 31809
- s = pd.Series(0, index=pd.period_range(start, end, freq=start_freq))
+ s = Series(0, index=pd.period_range(start, end, freq=start_freq))
s = s + np.arange(len(s))
result = s.resample(end_freq, offset=offset).mean()
result = result.to_timestamp(end_freq)
@@ -861,9 +861,9 @@ def test_sum_min_count(self):
index = pd.date_range(start="2018", freq="M", periods=6)
data = np.ones(6)
data[3:6] = np.nan
- s = pd.Series(data, index).to_period()
+ s = Series(data, index).to_period()
result = s.resample("Q").sum(min_count=1)
- expected = pd.Series(
+ expected = Series(
[3.0, np.nan], index=PeriodIndex(["2018Q1", "2018Q2"], freq="Q-DEC")
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index e4af5d93ff771..dbb85c2f890bf 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -595,10 +595,10 @@ def test_resample_agg_readonly():
arr = np.zeros_like(index)
arr.setflags(write=False)
- ser = pd.Series(arr, index=index)
+ ser = Series(arr, index=index)
rs = ser.resample("1D")
- expected = pd.Series([pd.Timestamp(0), pd.Timestamp(0)], index=index[::24])
+ expected = Series([pd.Timestamp(0), pd.Timestamp(0)], index=index[::24])
result = rs.agg("last")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 2f22be7c8cce9..53966392d3aff 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -142,7 +142,7 @@ def test_groupby_with_origin():
middle = "1/15/2000 00:00:00"
rng = pd.date_range(start, end, freq="1231min") # prime number
- ts = pd.Series(np.random.randn(len(rng)), index=rng)
+ ts = Series(np.random.randn(len(rng)), index=rng)
ts2 = ts[middle:end]
# proves that grouper without a fixed origin does not work
@@ -356,7 +356,7 @@ def test_apply_to_one_column_of_df():
index=pd.date_range("2012-01-01", periods=10, freq="20min"),
)
result = df.resample("H").apply(lambda group: group.col.sum())
- expected = pd.Series(
+ expected = Series(
[3, 12, 21, 9], index=pd.date_range("2012-01-01", periods=4, freq="H")
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index f638706207679..c8c5fa47706fc 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -167,9 +167,9 @@ def test_aggregate_normal(resample_method):
],
)
def test_resample_entirely_nat_window(method, method_args, unit):
- s = pd.Series([0] * 2 + [np.nan] * 2, index=pd.date_range("2017", periods=4))
+ s = Series([0] * 2 + [np.nan] * 2, index=pd.date_range("2017", periods=4))
result = methodcaller(method, **method_args)(s.resample("2d"))
- expected = pd.Series(
+ expected = Series(
[0.0, unit], index=pd.DatetimeIndex(["2017-01-01", "2017-01-03"], freq="2D")
)
tm.assert_series_equal(result, expected)
@@ -278,14 +278,14 @@ def test_repr():
],
)
def test_upsample_sum(method, method_args, expected_values):
- s = pd.Series(1, index=pd.date_range("2017", periods=2, freq="H"))
+ s = Series(1, index=pd.date_range("2017", periods=2, freq="H"))
resampled = s.resample("30T")
index = pd.DatetimeIndex(
["2017-01-01T00:00:00", "2017-01-01T00:30:00", "2017-01-01T01:00:00"],
freq="30T",
)
result = methodcaller(method, **method_args)(resampled)
- expected = pd.Series(expected_values, index=index)
+ expected = Series(expected_values, index=index)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/resample/test_timedelta.py b/pandas/tests/resample/test_timedelta.py
index 3fa85e62d028c..d0a0cf3cacd16 100644
--- a/pandas/tests/resample/test_timedelta.py
+++ b/pandas/tests/resample/test_timedelta.py
@@ -144,7 +144,7 @@ def test_resample_timedelta_edge_case(start, end, freq, resample_freq):
# GH 33498
# check that the timedelta bins does not contains an extra bin
idx = pd.timedelta_range(start=start, end=end, freq=freq)
- s = pd.Series(np.arange(len(idx)), index=idx)
+ s = Series(np.arange(len(idx)), index=idx)
result = s.resample(resample_freq).min()
expected_index = pd.timedelta_range(freq=resample_freq, start=start, end=end)
tm.assert_index_equal(result.index, expected_index)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index d4d4c4190417e..4cc72e66353b3 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -762,7 +762,7 @@ def test_join_on_tz_aware_datetimeindex(self):
)
result = df1.join(df2.set_index("date"), on="date")
expected = df1.copy()
- expected["vals_2"] = pd.Series([np.nan] * 2 + list("tuv"), dtype=object)
+ expected["vals_2"] = Series([np.nan] * 2 + list("tuv"), dtype=object)
tm.assert_frame_equal(result, expected)
def test_join_datetime_string(self):
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index aee503235d36c..6968dc781b6e3 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -46,22 +46,22 @@ def get_test_data(ngroups=NGROUPS, n=N):
def get_series():
return [
- pd.Series([1], dtype="int64"),
- pd.Series([1], dtype="Int64"),
- pd.Series([1.23]),
- pd.Series(["foo"]),
- pd.Series([True]),
- pd.Series([pd.Timestamp("2018-01-01")]),
- pd.Series([pd.Timestamp("2018-01-01", tz="US/Eastern")]),
+ Series([1], dtype="int64"),
+ Series([1], dtype="Int64"),
+ Series([1.23]),
+ Series(["foo"]),
+ Series([True]),
+ Series([pd.Timestamp("2018-01-01")]),
+ Series([pd.Timestamp("2018-01-01", tz="US/Eastern")]),
]
def get_series_na():
return [
- pd.Series([np.nan], dtype="Int64"),
- pd.Series([np.nan], dtype="float"),
- pd.Series([np.nan], dtype="object"),
- pd.Series([pd.NaT]),
+ Series([np.nan], dtype="Int64"),
+ Series([np.nan], dtype="float"),
+ Series([np.nan], dtype="object"),
+ Series([pd.NaT]),
]
@@ -253,16 +253,16 @@ def test_merge_different_column_key_names(self):
right, left_on="lkey", right_on="rkey", how="outer", sort=True
)
- exp = pd.Series(["bar", "baz", "foo", "foo", "foo", "foo", np.nan], name="lkey")
+ exp = Series(["bar", "baz", "foo", "foo", "foo", "foo", np.nan], name="lkey")
tm.assert_series_equal(merged["lkey"], exp)
- exp = pd.Series(["bar", np.nan, "foo", "foo", "foo", "foo", "qux"], name="rkey")
+ exp = Series(["bar", np.nan, "foo", "foo", "foo", "foo", "qux"], name="rkey")
tm.assert_series_equal(merged["rkey"], exp)
- exp = pd.Series([2, 3, 1, 1, 4, 4, np.nan], name="value_x")
+ exp = Series([2, 3, 1, 1, 4, 4, np.nan], name="value_x")
tm.assert_series_equal(merged["value_x"], exp)
- exp = pd.Series([6, np.nan, 5, 8, 5, 8, 7], name="value_y")
+ exp = Series([6, np.nan, 5, 8, 5, 8, 7], name="value_y")
tm.assert_series_equal(merged["value_y"], exp)
def test_merge_copy(self):
@@ -541,9 +541,9 @@ def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2):
df_empty = df[:0]
expected = pd.DataFrame(
{
- "value_x": pd.Series(dtype=df.dtypes["value"]),
- "key": pd.Series(dtype=df.dtypes["key"]),
- "value_y": pd.Series(dtype=df.dtypes["value"]),
+ "value_x": Series(dtype=df.dtypes["value"]),
+ "key": Series(dtype=df.dtypes["key"]),
+ "value_y": Series(dtype=df.dtypes["value"]),
},
columns=["value_x", "key", "value_y"],
)
@@ -676,7 +676,7 @@ def test_join_append_timedeltas(self):
def test_other_datetime_unit(self):
# GH 13389
df1 = pd.DataFrame({"entity_id": [101, 102]})
- s = pd.Series([None, None], index=[101, 102], name="days")
+ s = Series([None, None], index=[101, 102], name="days")
for dtype in [
"datetime64[D]",
@@ -707,7 +707,7 @@ def test_other_datetime_unit(self):
def test_other_timedelta_unit(self, unit):
# GH 13389
df1 = pd.DataFrame({"entity_id": [101, 102]})
- s = pd.Series([None, None], index=[101, 102], name="days")
+ s = Series([None, None], index=[101, 102], name="days")
dtype = f"m8[{unit}]"
df2 = s.astype(dtype).to_frame("days")
@@ -812,11 +812,11 @@ def test_merge_on_datetime64tz_empty(self):
result = left.merge(right, on="date")
expected = pd.DataFrame(
{
- "value_x": pd.Series(dtype=float),
- "date2_x": pd.Series(dtype=dtz),
- "date": pd.Series(dtype=dtz),
- "value_y": pd.Series(dtype=float),
- "date2_y": pd.Series(dtype=dtz),
+ "value_x": Series(dtype=float),
+ "date2_x": Series(dtype=dtz),
+ "date": Series(dtype=dtz),
+ "value_y": Series(dtype=float),
+ "date2_y": Series(dtype=dtz),
},
columns=["value_x", "date2_x", "date", "value_y", "date2_y"],
)
@@ -1521,8 +1521,8 @@ def test_merge_incompat_infer_boolean_object(self):
([0, 1, 2], Series(["a", "b", "a"]).astype("category")),
([0.0, 1.0, 2.0], Series(["a", "b", "a"]).astype("category")),
# no not infer
- ([0, 1], pd.Series([False, True], dtype=object)),
- ([0, 1], pd.Series([False, True], dtype=bool)),
+ ([0, 1], Series([False, True], dtype=object)),
+ ([0, 1], Series([False, True], dtype=bool)),
],
)
def test_merge_incompat_dtypes_are_ok(self, df1_vals, df2_vals):
@@ -1836,10 +1836,10 @@ def test_merging_with_bool_or_int_cateorical_column(
def test_merge_on_int_array(self):
# GH 23020
- df = pd.DataFrame({"A": pd.Series([1, 2, np.nan], dtype="Int64"), "B": 1})
+ df = pd.DataFrame({"A": Series([1, 2, np.nan], dtype="Int64"), "B": 1})
result = pd.merge(df, df, on="A")
expected = pd.DataFrame(
- {"A": pd.Series([1, 2, np.nan], dtype="Int64"), "B_x": 1, "B_y": 1}
+ {"A": Series([1, 2, np.nan], dtype="Int64"), "B_x": 1, "B_y": 1}
)
tm.assert_frame_equal(result, expected)
@@ -1956,7 +1956,7 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm):
[["a", "b"], [0, 1]], names=["outer", "inner"]
),
)
- b = pd.Series(
+ b = Series(
[1, 2, 3, 4],
index=pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["outer", "inner"]
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 48500531aa351..340b50ed60ceb 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -91,7 +91,7 @@ def _check_expected_dtype(self, obj, label):
assert obj.dtype == "object"
else:
assert obj.dtype == label
- elif isinstance(obj, pd.Series):
+ elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
@@ -103,7 +103,7 @@ def test_dtypes(self):
# to confirm test case covers intended dtypes
for typ, vals in self.data.items():
self._check_expected_dtype(pd.Index(vals), typ)
- self._check_expected_dtype(pd.Series(vals), typ)
+ self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self):
# GH 13660
@@ -155,42 +155,42 @@ def test_concatlike_same_dtypes(self):
# ----- Series ----- #
# series.append
- res = pd.Series(vals1).append(pd.Series(vals2), ignore_index=True)
- exp = pd.Series(exp_data)
+ res = Series(vals1).append(Series(vals2), ignore_index=True)
+ exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
- res = pd.concat([pd.Series(vals1), pd.Series(vals2)], ignore_index=True)
+ res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
- res = pd.Series(vals1).append(
- [pd.Series(vals2), pd.Series(vals3)], ignore_index=True
+ res = Series(vals1).append(
+ [Series(vals2), Series(vals3)], ignore_index=True
)
- exp = pd.Series(exp_data3)
+ exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
- [pd.Series(vals1), pd.Series(vals2), pd.Series(vals3)],
+ [Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
- s1 = pd.Series(vals1, name="x")
- s2 = pd.Series(vals2, name="y")
+ s1 = Series(vals1, name="x")
+ s2 = Series(vals2, name="y")
res = s1.append(s2, ignore_index=True)
- exp = pd.Series(exp_data)
+ exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
- s1 = pd.Series(vals1, name="x")
- s2 = pd.Series(vals2, name="x")
+ s1 = Series(vals1, name="x")
+ s2 = Series(vals2, name="x")
res = s1.append(s2, ignore_index=True)
- exp = pd.Series(exp_data, name="x")
+ exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
@@ -202,16 +202,16 @@ def test_concatlike_same_dtypes(self):
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
- pd.Series(vals1).append(vals2)
+ Series(vals1).append(vals2)
with pytest.raises(TypeError, match=msg):
- pd.Series(vals1).append([pd.Series(vals2), vals3])
+ Series(vals1).append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
- pd.concat([pd.Series(vals1), vals2])
+ pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
- pd.concat([pd.Series(vals1), pd.Series(vals2), vals3])
+ pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self):
# GH 13660
@@ -265,23 +265,23 @@ def test_concatlike_dtypes_coercion(self):
# ----- Series ----- #
# series.append
- res = pd.Series(vals1).append(pd.Series(vals2), ignore_index=True)
- exp = pd.Series(exp_data, dtype=exp_series_dtype)
+ res = Series(vals1).append(Series(vals2), ignore_index=True)
+ exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
- res = pd.concat([pd.Series(vals1), pd.Series(vals2)], ignore_index=True)
+ res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
- res = pd.Series(vals1).append(
- [pd.Series(vals2), pd.Series(vals3)], ignore_index=True
+ res = Series(vals1).append(
+ [Series(vals2), Series(vals3)], ignore_index=True
)
- exp = pd.Series(exp_data3, dtype=exp_series_dtype)
+ exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
res = pd.concat(
- [pd.Series(vals1), pd.Series(vals2), pd.Series(vals3)],
+ [Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
@@ -306,15 +306,15 @@ def test_concatlike_common_coerce_to_pandas_object(self):
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
- dts = pd.Series(dti)
- tds = pd.Series(tdi)
+ dts = Series(dti)
+ tds = Series(tdi)
res = dts.append(tds)
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
@@ -331,13 +331,13 @@ def test_concatlike_datetimetz(self, tz_aware_fixture):
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
- dts1 = pd.Series(dti1)
- dts2 = pd.Series(dti2)
+ dts1 = Series(dti1)
+ dts2 = Series(dti2)
res = dts1.append(dts2)
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
@@ -377,13 +377,13 @@ def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
- dts1 = pd.Series(dti1)
- dts2 = pd.Series(dti2)
+ dts1 = Series(dti1)
+ dts2 = Series(dti2)
res = dts1.append(dts2)
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
@@ -401,13 +401,13 @@ def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
res = dti1.append(dti3)
# tm.assert_index_equal(res, exp)
- dts1 = pd.Series(dti1)
- dts3 = pd.Series(dti3)
+ dts1 = Series(dti1)
+ dts3 = Series(dti3)
res = dts1.append(dts3)
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
@@ -419,13 +419,13 @@ def test_concatlike_common_period(self):
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
- ps1 = pd.Series(pi1)
- ps2 = pd.Series(pi2)
+ ps1 = Series(pi1)
+ ps2 = Series(pi2)
res = ps1.append(ps2)
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
@@ -445,13 +445,13 @@ def test_concatlike_common_period_diff_freq_to_object(self):
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
- ps1 = pd.Series(pi1)
- ps2 = pd.Series(pi2)
+ ps1 = Series(pi1)
+ ps2 = Series(pi2)
res = ps1.append(ps2)
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
@@ -471,13 +471,13 @@ def test_concatlike_common_period_mixed_dt_to_object(self):
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
- ps1 = pd.Series(pi1)
- tds = pd.Series(tdi)
+ ps1 = Series(pi1)
+ tds = Series(tdi)
res = ps1.append(tds)
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = pd.Index(
@@ -493,47 +493,47 @@ def test_concatlike_common_period_mixed_dt_to_object(self):
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
- ps1 = pd.Series(pi1)
- tds = pd.Series(tdi)
+ ps1 = Series(pi1)
+ tds = Series(tdi)
res = tds.append(ps1)
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
- tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
- s1 = pd.Series([1, 2, np.nan], dtype="category")
- s2 = pd.Series([2, 1, 2], dtype="category")
+ s1 = Series([1, 2, np.nan], dtype="category")
+ s2 = Series([2, 1, 2], dtype="category")
- exp = pd.Series([1, 2, np.nan, 2, 1, 2], dtype="category")
+ exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# partially different categories => not-category
- s1 = pd.Series([3, 2], dtype="category")
- s2 = pd.Series([2, 1], dtype="category")
+ s1 = Series([3, 2], dtype="category")
+ s2 = Series([2, 1], dtype="category")
- exp = pd.Series([3, 2, 2, 1])
+ exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
- s1 = pd.Series([10, 11, np.nan], dtype="category")
- s2 = pd.Series([np.nan, 1, 3, 2], dtype="category")
+ s1 = Series([10, 11, np.nan], dtype="category")
+ s2 = Series([np.nan, 1, 3, 2], dtype="category")
- exp = pd.Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype="object")
+ exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype="object")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
- a = pd.Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
- b = pd.Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
+ a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
+ b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
- expected = pd.Series(
+ expected = Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
tm.assert_series_equal(result, expected)
@@ -542,63 +542,63 @@ def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
- s1 = pd.Series([1, 2, np.nan], dtype="category")
- s2 = pd.Series([2, 1, 2])
+ s1 = Series([1, 2, np.nan], dtype="category")
+ s2 = Series([2, 1, 2])
- exp = pd.Series([1, 2, np.nan, 2, 1, 2], dtype="object")
+ exp = Series([1, 2, np.nan, 2, 1, 2], dtype="object")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# result shouldn't be affected by 1st elem dtype
- exp = pd.Series([2, 1, 2, 1, 2, np.nan], dtype="object")
+ exp = Series([2, 1, 2, 1, 2, np.nan], dtype="object")
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# all values are not in category => not-category
- s1 = pd.Series([3, 2], dtype="category")
- s2 = pd.Series([2, 1])
+ s1 = Series([3, 2], dtype="category")
+ s2 = Series([2, 1])
- exp = pd.Series([3, 2, 2, 1])
+ exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
- exp = pd.Series([2, 1, 3, 2])
+ exp = Series([2, 1, 3, 2])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# completely different categories => not-category
- s1 = pd.Series([10, 11, np.nan], dtype="category")
- s2 = pd.Series([1, 3, 2])
+ s1 = Series([10, 11, np.nan], dtype="category")
+ s2 = Series([1, 3, 2])
- exp = pd.Series([10, 11, np.nan, 1, 3, 2], dtype="object")
+ exp = Series([10, 11, np.nan, 1, 3, 2], dtype="object")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
- exp = pd.Series([1, 3, 2, 10, 11, np.nan], dtype="object")
+ exp = Series([1, 3, 2, 10, 11, np.nan], dtype="object")
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# different dtype => not-category
- s1 = pd.Series([10, 11, np.nan], dtype="category")
- s2 = pd.Series(["a", "b", "c"])
+ s1 = Series([10, 11, np.nan], dtype="category")
+ s2 = Series(["a", "b", "c"])
- exp = pd.Series([10, 11, np.nan, "a", "b", "c"])
+ exp = Series([10, 11, np.nan, "a", "b", "c"])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
- exp = pd.Series(["a", "b", "c", 10, 11, np.nan])
+ exp = Series(["a", "b", "c", 10, 11, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# if normal series only contains NaN-likes => not-category
- s1 = pd.Series([10, 11], dtype="category")
- s2 = pd.Series([np.nan, np.nan, np.nan])
+ s1 = Series([10, 11], dtype="category")
+ s2 = Series([np.nan, np.nan, np.nan])
- exp = pd.Series([10, 11, np.nan, np.nan, np.nan])
+ exp = Series([10, 11, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
- exp = pd.Series([np.nan, np.nan, np.nan, 10, 11])
+ exp = Series([np.nan, np.nan, np.nan, 10, 11])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
@@ -606,62 +606,62 @@ def test_concat_categorical_3elem_coercion(self):
# GH 13524
# mixed dtypes => not-category
- s1 = pd.Series([1, 2, np.nan], dtype="category")
- s2 = pd.Series([2, 1, 2], dtype="category")
- s3 = pd.Series([1, 2, 1, 2, np.nan])
+ s1 = Series([1, 2, np.nan], dtype="category")
+ s2 = Series([2, 1, 2], dtype="category")
+ s3 = Series([1, 2, 1, 2, np.nan])
- exp = pd.Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
+ exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
- exp = pd.Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
+ exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
- s1 = pd.Series([4, 5, 6], dtype="category")
- s2 = pd.Series([1, 2, 3], dtype="category")
- s3 = pd.Series([1, 3, 4])
+ s1 = Series([4, 5, 6], dtype="category")
+ s2 = Series([1, 2, 3], dtype="category")
+ s3 = Series([1, 3, 4])
- exp = pd.Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
+ exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
- exp = pd.Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
+ exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
- s1 = pd.Series([4, 5, 6], dtype="category")
- s2 = pd.Series([1, 2, 3], dtype="category")
- s3 = pd.Series([10, 11, 12])
+ s1 = Series([4, 5, 6], dtype="category")
+ s2 = Series([1, 2, 3], dtype="category")
+ s3 = Series([10, 11, 12])
- exp = pd.Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
+ exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
- exp = pd.Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
+ exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
def test_concat_categorical_multi_coercion(self):
# GH 13524
- s1 = pd.Series([1, 3], dtype="category")
- s2 = pd.Series([3, 4], dtype="category")
- s3 = pd.Series([2, 3])
- s4 = pd.Series([2, 2], dtype="category")
- s5 = pd.Series([1, np.nan])
- s6 = pd.Series([1, 3, 2], dtype="category")
+ s1 = Series([1, 3], dtype="category")
+ s2 = Series([3, 4], dtype="category")
+ s3 = Series([2, 3])
+ s4 = Series([2, 2], dtype="category")
+ s5 = Series([1, np.nan])
+ s6 = Series([1, 3, 2], dtype="category")
# mixed dtype, values are all in categories => not-category
- exp = pd.Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
+ exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s1.append([s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
- exp = pd.Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
+ exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s6.append([s5, s4, s3, s2, s1], ignore_index=True)
@@ -670,14 +670,14 @@ def test_concat_categorical_multi_coercion(self):
def test_concat_categorical_ordered(self):
# GH 13524
- s1 = pd.Series(pd.Categorical([1, 2, np.nan], ordered=True))
- s2 = pd.Series(pd.Categorical([2, 1, 2], ordered=True))
+ s1 = Series(pd.Categorical([1, 2, np.nan], ordered=True))
+ s2 = Series(pd.Categorical([2, 1, 2], ordered=True))
- exp = pd.Series(pd.Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
+ exp = Series(pd.Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
- exp = pd.Series(
+ exp = Series(
pd.Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True)
)
tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp)
@@ -688,35 +688,35 @@ def test_concat_categorical_coercion_nan(self):
# some edge cases
# category + not-category => not category
- s1 = pd.Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category")
- s2 = pd.Series([np.nan, 1])
+ s1 = Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category")
+ s2 = Series([np.nan, 1])
- exp = pd.Series([np.nan, np.nan, np.nan, 1])
+ exp = Series([np.nan, np.nan, np.nan, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
- s1 = pd.Series([1, np.nan], dtype="category")
- s2 = pd.Series([np.nan, np.nan])
+ s1 = Series([1, np.nan], dtype="category")
+ s2 = Series([np.nan, np.nan])
- exp = pd.Series([1, np.nan, np.nan, np.nan], dtype="float")
+ exp = Series([1, np.nan, np.nan, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# mixed dtype, all nan-likes => not-category
- s1 = pd.Series([np.nan, np.nan], dtype="category")
- s2 = pd.Series([np.nan, np.nan])
+ s1 = Series([np.nan, np.nan], dtype="category")
+ s2 = Series([np.nan, np.nan])
- exp = pd.Series([np.nan, np.nan, np.nan, np.nan])
+ exp = Series([np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# all category nan-likes => category
- s1 = pd.Series([np.nan, np.nan], dtype="category")
- s2 = pd.Series([np.nan, np.nan], dtype="category")
+ s1 = Series([np.nan, np.nan], dtype="category")
+ s2 = Series([np.nan, np.nan], dtype="category")
- exp = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype="category")
+ exp = Series([np.nan, np.nan, np.nan, np.nan], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
@@ -724,8 +724,8 @@ def test_concat_categorical_coercion_nan(self):
def test_concat_categorical_empty(self):
# GH 13524
- s1 = pd.Series([], dtype="category")
- s2 = pd.Series([1, 2], dtype="category")
+ s1 = Series([], dtype="category")
+ s2 = Series([1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
@@ -733,14 +733,14 @@ def test_concat_categorical_empty(self):
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)
- s1 = pd.Series([], dtype="category")
- s2 = pd.Series([], dtype="category")
+ s1 = Series([], dtype="category")
+ s2 = Series([], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
- s1 = pd.Series([], dtype="category")
- s2 = pd.Series([], dtype="object")
+ s1 = Series([], dtype="category")
+ s2 = Series([], dtype="object")
# different dtype => not-category
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
@@ -748,11 +748,11 @@ def test_concat_categorical_empty(self):
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)
- s1 = pd.Series([], dtype="category")
- s2 = pd.Series([np.nan, np.nan])
+ s1 = Series([], dtype="category")
+ s2 = Series([np.nan, np.nan])
# empty Series is ignored
- exp = pd.Series([np.nan, np.nan])
+ exp = Series([np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
@@ -939,7 +939,7 @@ def test_append_same_columns_type(self, index):
# df wider than ser
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index)
ser_index = index[:2]
- ser = pd.Series([7, 8], index=ser_index, name=2)
+ ser = Series([7, 8], index=ser_index, name=2)
result = df.append(ser)
expected = pd.DataFrame(
[[1.0, 2.0, 3.0], [4, 5, 6], [7, 8, np.nan]], index=[0, 1, 2], columns=index
@@ -950,7 +950,7 @@ def test_append_same_columns_type(self, index):
ser_index = index
index = index[:2]
df = pd.DataFrame([[1, 2], [4, 5]], columns=index)
- ser = pd.Series([7, 8, 9], index=ser_index, name=2)
+ ser = Series([7, 8, 9], index=ser_index, name=2)
result = df.append(ser)
expected = pd.DataFrame(
[[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]],
@@ -970,7 +970,7 @@ def test_append_different_columns_types(self, df_columns, series_index):
# for errors raised when appending
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns)
- ser = pd.Series([7, 8, 9], index=series_index, name=2)
+ ser = Series([7, 8, 9], index=series_index, name=2)
result = df.append(ser)
idx_diff = ser.index.difference(df_columns)
@@ -1005,7 +1005,7 @@ def test_append_different_columns_types_raises(
# appending without raising.
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append)
- ser = pd.Series([7, 8, 9], index=index_cannot_append_with_other, name=2)
+ ser = Series([7, 8, 9], index=index_cannot_append_with_other, name=2)
msg = (
r"Expected tuple, got (int|long|float|str|"
r"pandas._libs.interval.Interval)|"
@@ -1018,7 +1018,7 @@ def test_append_different_columns_types_raises(
df = pd.DataFrame(
[[1, 2, 3], [4, 5, 6]], columns=index_cannot_append_with_other
)
- ser = pd.Series([7, 8, 9], index=index_can_append, name=2)
+ ser = Series([7, 8, 9], index=index_can_append, name=2)
with pytest.raises(TypeError, match=msg):
df.append(ser)
@@ -1122,7 +1122,7 @@ def test_append_empty_tz_frame_with_datetime64ns(self):
# also test with typed value to append
df = pd.DataFrame(columns=["a"]).astype("datetime64[ns, UTC]")
result = df.append(
- pd.Series({"a": pd.NaT}, dtype="datetime64[ns]"), ignore_index=True
+ Series({"a": pd.NaT}, dtype="datetime64[ns]"), ignore_index=True
)
expected = pd.DataFrame({"a": [pd.NaT]}).astype("datetime64[ns]")
tm.assert_frame_equal(result, expected)
@@ -1986,16 +1986,16 @@ def test_concat_NaT_series(self):
tm.assert_series_equal(result, expected)
# without tz
- x = pd.Series(pd.date_range("20151124 08:00", "20151124 09:00", freq="1h"))
- y = pd.Series(pd.date_range("20151124 10:00", "20151124 11:00", freq="1h"))
+ x = Series(pd.date_range("20151124 08:00", "20151124 09:00", freq="1h"))
+ y = Series(pd.date_range("20151124 10:00", "20151124 11:00", freq="1h"))
y[:] = pd.NaT
- expected = pd.Series([x[0], x[1], pd.NaT, pd.NaT])
+ expected = Series([x[0], x[1], pd.NaT, pd.NaT])
result = pd.concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT without tz
x[:] = pd.NaT
- expected = pd.Series(pd.NaT, index=range(4), dtype="datetime64[ns]")
+ expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns]")
result = pd.concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
@@ -2075,13 +2075,13 @@ def test_concat_tz_series_with_datetimelike(self):
pd.Timestamp("2011-02-01", tz="US/Eastern"),
]
y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
- result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)
- tm.assert_series_equal(result, pd.Series(x + y, dtype="object"))
+ result = concat([Series(x), Series(y)], ignore_index=True)
+ tm.assert_series_equal(result, Series(x + y, dtype="object"))
# tz and period
y = [pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M")]
- result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)
- tm.assert_series_equal(result, pd.Series(x + y, dtype="object"))
+ result = concat([Series(x), Series(y)], ignore_index=True)
+ tm.assert_series_equal(result, Series(x + y, dtype="object"))
def test_concat_tz_series_tzlocal(self):
# see gh-13583
@@ -2094,8 +2094,8 @@ def test_concat_tz_series_tzlocal(self):
pd.Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()),
]
- result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)
- tm.assert_series_equal(result, pd.Series(x + y))
+ result = concat([Series(x), Series(y)], ignore_index=True)
+ tm.assert_series_equal(result, Series(x + y))
assert result.dtype == "datetime64[ns, tzlocal()]"
@pytest.mark.parametrize("tz1", [None, "UTC"])
@@ -2111,7 +2111,7 @@ def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s):
second = pd.DataFrame([s]).apply(lambda x: x.dt.tz_localize(tz2))
result = pd.concat([first, second], axis=0)
- expected = pd.DataFrame(pd.Series([pd.NaT, pd.NaT, s], index=[0, 1, 0]))
+ expected = pd.DataFrame(Series([pd.NaT, pd.NaT, s], index=[0, 1, 0]))
expected = expected.apply(lambda x: x.dt.tz_localize(tz2))
if tz1 != tz2:
expected = expected.astype(object)
@@ -2123,12 +2123,12 @@ def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s):
def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2):
# GH 12396
- first = pd.DataFrame(pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1))
- second = pd.DataFrame(pd.Series([pd.NaT]).dt.tz_localize(tz2), columns=[1])
+ first = pd.DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1))
+ second = pd.DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1])
expected = pd.DataFrame(
{
- 0: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1),
- 1: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2),
+ 0: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1),
+ 1: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2),
}
)
result = pd.concat([first, second], axis=1)
@@ -2140,7 +2140,7 @@ def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2):
# GH 12396
# tz-naive
- first = pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)
+ first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)
second = pd.DataFrame(
[
[pd.Timestamp("2015/01/01", tz=tz2)],
@@ -2225,8 +2225,8 @@ def test_concat_period_other_series(self):
def test_concat_empty_series(self):
# GH 11082
- s1 = pd.Series([1, 2, 3], name="x")
- s2 = pd.Series(name="y", dtype="float64")
+ s1 = Series([1, 2, 3], name="x")
+ s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = pd.DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
@@ -2234,16 +2234,16 @@ def test_concat_empty_series(self):
)
tm.assert_frame_equal(res, exp)
- s1 = pd.Series([1, 2, 3], name="x")
- s2 = pd.Series(name="y", dtype="float64")
+ s1 = Series([1, 2, 3], name="x")
+ s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
- exp = pd.Series([1, 2, 3])
+ exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
- s1 = pd.Series([1, 2, 3], name="x")
- s2 = pd.Series(name=None, dtype="float64")
+ s1 = Series([1, 2, 3], name="x")
+ s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = pd.DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
@@ -2263,7 +2263,7 @@ def test_concat_empty_series_timelike(self, tz, values):
expected = DataFrame(
{
- 0: pd.Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
+ 0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
@@ -2272,8 +2272,8 @@ def test_concat_empty_series_timelike(self, tz, values):
def test_default_index(self):
# is_series and ignore_index
- s1 = pd.Series([1, 2, 3], name="x")
- s2 = pd.Series([4, 5, 6], name="y")
+ s1 = Series([1, 2, 3], name="x")
+ s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]])
@@ -2282,8 +2282,8 @@ def test_default_index(self):
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
- s1 = pd.Series([1, 2, 3])
- s2 = pd.Series([4, 5, 6])
+ s1 = Series([1, 2, 3])
+ s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]])
@@ -2499,9 +2499,9 @@ def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
- a = pd.Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
- b = pd.Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
- c = pd.Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
+ a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
+ b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
+ c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
@@ -2596,25 +2596,25 @@ def test_concat_datetime_timezone(self):
tm.assert_frame_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
- a = pd.Series(pd.core.arrays.integer_array([1, 2]))
- b = pd.Series(to_decimal([1, 2]))
+ a = Series(pd.core.arrays.integer_array([1, 2]))
+ b = Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
- expected = pd.Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
+ expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
- [pd.Series(range(3)), pd.Series(range(4))], keys=["First", "Another"]
+ [Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
- dict([("First", pd.Series(range(3))), ("Another", pd.Series(range(4)))])
+ dict([("First", Series(range(3))), ("Another", Series(range(4)))])
)
tm.assert_series_equal(result, expected)
-@pytest.mark.parametrize("pdt", [pd.Series, pd.DataFrame])
+@pytest.mark.parametrize("pdt", [Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
@@ -2654,8 +2654,8 @@ def test_concat_empty_and_non_empty_frame_regression():
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
- s1 = pd.Series([1])
- s2 = pd.Series([], dtype=object)
+ s1 = Series([1])
+ s2 = Series([], dtype=object)
expected = s1
result = pd.concat([s1, s2])
@@ -2742,19 +2742,19 @@ def test_concat_aligned_sort_does_not_raise():
@pytest.mark.parametrize("s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))])
def test_concat_series_name_npscalar_tuple(s1name, s2name):
# GH21015
- s1 = pd.Series({"a": 1, "b": 2}, name=s1name)
- s2 = pd.Series({"c": 5, "d": 6}, name=s2name)
+ s1 = Series({"a": 1, "b": 2}, name=s1name)
+ s2 = Series({"c": 5, "d": 6}, name=s2name)
result = pd.concat([s1, s2])
- expected = pd.Series({"a": 1, "b": 2, "c": 5, "d": 6})
+ expected = Series({"a": 1, "b": 2, "c": 5, "d": 6})
tm.assert_series_equal(result, expected)
def test_concat_categorical_tz():
# GH-23816
- a = pd.Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
- b = pd.Series(["a", "b"], dtype="category")
+ a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
+ b = Series(["a", "b"], dtype="category")
result = pd.concat([a, b], ignore_index=True)
- expected = pd.Series(
+ expected = Series(
[
pd.Timestamp("2017-01-01", tz="US/Pacific"),
pd.Timestamp("2017-01-02", tz="US/Pacific"),
@@ -2769,13 +2769,13 @@ def test_concat_categorical_unchanged():
# GH-12007
# test fix for when concat on categorical and float
# coerces dtype categorical -> float
- df = pd.DataFrame(pd.Series(["a", "b", "c"], dtype="category", name="A"))
- ser = pd.Series([0, 1, 2], index=[0, 1, 3], name="B")
+ df = pd.DataFrame(Series(["a", "b", "c"], dtype="category", name="A"))
+ ser = Series([0, 1, 2], index=[0, 1, 3], name="B")
result = pd.concat([df, ser], axis=1)
expected = pd.DataFrame(
{
- "A": pd.Series(["a", "b", "c", np.nan], dtype="category"),
- "B": pd.Series([0, 1, np.nan, 2], dtype="float"),
+ "A": Series(["a", "b", "c", np.nan], dtype="category"),
+ "B": Series([0, 1, np.nan, 2], dtype="float"),
}
)
tm.assert_equal(result, expected)
@@ -2808,7 +2808,7 @@ def test_concat_empty_df_object_dtype():
def test_concat_sparse():
# GH 23557
- a = pd.Series(SparseArray([0, 1, 2]))
+ a = Series(SparseArray([0, 1, 2]))
expected = pd.DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype(np.int64, 0)
)
@@ -2818,9 +2818,9 @@ def test_concat_sparse():
def test_concat_dense_sparse():
# GH 30668
- a = pd.Series(pd.arrays.SparseArray([1, None]), dtype=float)
- b = pd.Series([1], dtype=float)
- expected = pd.Series(data=[1, None, 1], index=[0, 1, 0]).astype(
+ a = Series(pd.arrays.SparseArray([1, None]), dtype=float)
+ b = Series([1], dtype=float)
+ expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(
pd.SparseDtype(np.float64, None)
)
result = pd.concat([a, b], axis=0)
@@ -3196,11 +3196,11 @@ def test_concat_axis_parameter(self):
concatted_1 = pd.concat([df1, df2], axis=1)
tm.assert_frame_equal(concatted_1, expected_columns)
- series1 = pd.Series([0.1, 0.2])
- series2 = pd.Series([0.3, 0.4])
+ series1 = Series([0.1, 0.2])
+ series2 = Series([0.3, 0.4])
# Index/row/0 Series
- expected_index_series = pd.Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
+ expected_index_series = Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
concatted_index_series = pd.concat([series1, series2], axis="index")
tm.assert_series_equal(concatted_index_series, expected_index_series)
diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py
index 4d2195da85a13..e6091a63b3e97 100644
--- a/pandas/tests/reshape/test_cut.py
+++ b/pandas/tests/reshape/test_cut.py
@@ -668,9 +668,9 @@ def test_cut_unordered_with_missing_labels_raises_error():
def test_cut_unordered_with_series_labels():
# https://github.com/pandas-dev/pandas/issues/36603
- s = pd.Series([1, 2, 3, 4, 5])
- bins = pd.Series([0, 2, 4, 6])
- labels = pd.Series(["a", "b", "c"])
+ s = Series([1, 2, 3, 4, 5])
+ bins = Series([0, 2, 4, 6])
+ labels = Series(["a", "b", "c"])
result = pd.cut(s, bins=bins, labels=labels, ordered=False)
- expected = pd.Series(["a", "a", "b", "b", "c"], dtype="category")
+ expected = Series(["a", "a", "b", "b", "c"], dtype="category")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 67b3151b0ff9c..943a7d0a3cf86 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -707,7 +707,7 @@ def test_pivot_periods_with_margins(self):
[
["baz", "zoo"],
np.array(["baz", "zoo"]),
- pd.Series(["baz", "zoo"]),
+ Series(["baz", "zoo"]),
pd.Index(["baz", "zoo"]),
],
)
@@ -743,7 +743,7 @@ def test_pivot_with_list_like_values(self, values, method):
[
["bar", "baz"],
np.array(["bar", "baz"]),
- pd.Series(["bar", "baz"]),
+ Series(["bar", "baz"]),
pd.Index(["bar", "baz"]),
],
)
diff --git a/pandas/tests/reshape/test_union_categoricals.py b/pandas/tests/reshape/test_union_categoricals.py
index 8918d19e4ba7b..f6a4f8c0cf601 100644
--- a/pandas/tests/reshape/test_union_categoricals.py
+++ b/pandas/tests/reshape/test_union_categoricals.py
@@ -331,7 +331,7 @@ def test_union_categoricals_sort_false(self):
def test_union_categorical_unwrap(self):
# GH 14173
c1 = Categorical(["a", "b"])
- c2 = pd.Series(["b", "c"], dtype="category")
+ c2 = Series(["b", "c"], dtype="category")
result = union_categoricals([c1, c2])
expected = Categorical(["a", "b", "b", "c"])
tm.assert_categorical_equal(result, expected)
diff --git a/pandas/tests/series/apply/test_series_apply.py b/pandas/tests/series/apply/test_series_apply.py
index ce8759c4ba76d..61ebd2fcb3a27 100644
--- a/pandas/tests/series/apply/test_series_apply.py
+++ b/pandas/tests/series/apply/test_series_apply.py
@@ -92,56 +92,56 @@ def func(x):
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
- s = pd.Series(vals)
+ s = Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
- exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
+ exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
- s = pd.Series(vals)
+ s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
- exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
+ exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
- s = pd.Series(vals)
+ s = Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
- exp = pd.Series(["Timedelta_1", "Timedelta_2"])
+ exp = Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
- s = pd.Series(vals)
+ s = Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
- exp = pd.Series(["Period_M", "Period_M"])
+ exp = Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
- s = pd.Series(values, name="XX")
+ s = Series(values, name="XX")
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
- exp = pd.Series(exp_values, name="XX")
+ exp = Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
- exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
+ exp = Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
@@ -151,7 +151,7 @@ def f(x):
return str(x.tz)
result = s.map(f)
- exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
+ exp = Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
@@ -167,34 +167,34 @@ def test_apply_dict_depr(self):
def test_apply_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
- ser = pd.Series(values, name="XX", index=list("abcdefg"))
+ ser = Series(values, name="XX", index=list("abcdefg"))
result = ser.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
- exp = pd.Series(values, name="XX", index=list("abcdefg"))
+ exp = Series(values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = ser.apply(lambda x: "A")
- exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
+ exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
@pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]])
def test_apply_categorical_with_nan_values(self, series):
# GH 20714 bug fixed in: GH 24275
- s = pd.Series(series, dtype="category")
+ s = Series(series, dtype="category")
result = s.apply(lambda x: x.split("-")[0])
result = result.astype(object)
- expected = pd.Series(["1", "1", np.NaN], dtype="category")
+ expected = Series(["1", "1", np.NaN], dtype="category")
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
def test_apply_empty_integer_series_with_datetime_index(self):
# GH 21245
- s = pd.Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int)
+ s = Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int)
result = s.apply(lambda x: x)
tm.assert_series_equal(result, s)
@@ -447,9 +447,9 @@ def test_agg_cython_table_raises(self, series, func, expected):
def test_series_apply_no_suffix_index(self):
# GH36189
- s = pd.Series([4] * 3)
+ s = Series([4] * 3)
result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()])
- expected = pd.Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
+ expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
tm.assert_series_equal(result, expected)
@@ -517,7 +517,7 @@ def test_map_empty(self, index):
s = Series(index)
result = s.map({})
- expected = pd.Series(np.nan, index=s.index)
+ expected = Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
@@ -570,7 +570,7 @@ def test_map_dict_with_tuple_keys(self):
label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"}
df["labels"] = df["a"].map(label_mappings)
- df["expected_labels"] = pd.Series(["A", "B", "A", "B"], index=df.index)
+ df["expected_labels"] = Series(["A", "B", "A", "B"], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df["labels"], df["expected_labels"], check_names=False)
@@ -651,53 +651,53 @@ def __missing__(self, key):
def test_map_box(self):
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
- s = pd.Series(vals)
+ s = Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
- exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
+ exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
- s = pd.Series(vals)
+ s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
- exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
+ exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
- s = pd.Series(vals)
+ s = Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
- exp = pd.Series(["Timedelta_1", "Timedelta_2"])
+ exp = Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
- s = pd.Series(vals)
+ s = Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
- exp = pd.Series(["Period_M", "Period_M"])
+ exp = Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
- s = pd.Series(values, name="XX", index=list("abcdefg"))
+ s = Series(values, name="XX", index=list("abcdefg"))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(
list("abbabcd"), categories=list("dcba"), ordered=True
)
- exp = pd.Series(exp_values, name="XX", index=list("abcdefg"))
+ exp = Series(exp_values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: "A")
- exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
+ exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
@@ -708,20 +708,20 @@ def test_map_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
- s = pd.Series(values, name="XX")
+ s = Series(values, name="XX")
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
- exp = pd.Series(exp_values, name="XX")
+ exp = Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
- exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
+ exp = Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError):
@@ -734,7 +734,7 @@ def f(x):
return str(x.tz)
result = s.map(f)
- exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
+ exp = Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
@@ -747,10 +747,10 @@ def f(x):
)
def test_map_missing_mixed(self, vals, mapping, exp):
# GH20495
- s = pd.Series(vals + [np.nan])
+ s = Series(vals + [np.nan])
result = s.map(mapping)
- tm.assert_series_equal(result, pd.Series(exp))
+ tm.assert_series_equal(result, Series(exp))
@pytest.mark.parametrize(
"dti,exp",
@@ -769,26 +769,26 @@ def test_apply_series_on_date_time_index_aware_series(self, dti, exp):
# GH 25959
# Calling apply on a localized time series should not cause an error
index = dti.tz_localize("UTC").index
- result = pd.Series(index).apply(lambda x: pd.Series([1, 2]))
+ result = Series(index).apply(lambda x: Series([1, 2]))
tm.assert_frame_equal(result, exp)
def test_apply_scaler_on_date_time_index_aware_series(self):
# GH 25959
# Calling apply on a localized time series should not cause an error
series = tm.makeTimeSeries(nper=30).tz_localize("UTC")
- result = pd.Series(series.index).apply(lambda x: 1)
- tm.assert_series_equal(result, pd.Series(np.ones(30), dtype="int64"))
+ result = Series(series.index).apply(lambda x: 1)
+ tm.assert_series_equal(result, Series(np.ones(30), dtype="int64"))
def test_map_float_to_string_precision(self):
# GH 13228
- ser = pd.Series(1 / 3)
+ ser = Series(1 / 3)
result = ser.map(lambda val: str(val)).to_dict()
expected = {0: "0.3333333333333333"}
assert result == expected
def test_map_with_invalid_na_action_raises(self):
# https://github.com/pandas-dev/pandas/issues/32815
- s = pd.Series([1, 2, 3])
+ s = Series([1, 2, 3])
msg = "na_action must either be 'ignore' or None"
with pytest.raises(ValueError, match=msg):
s.map(lambda x: x, na_action="____")
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 1801d13e75565..2e3d67786afdc 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -552,7 +552,7 @@ def test_indexing_over_size_cutoff_period_index(monkeypatch):
idx = pd.period_range("1/1/2000", freq="T", periods=n)
assert idx._engine.over_size_threshold
- s = pd.Series(np.random.randn(len(idx)), index=idx)
+ s = Series(np.random.randn(len(idx)), index=idx)
pos = n - 1
timestamp = idx[pos]
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 5b585e8802752..06c14a95ab04e 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -90,7 +90,7 @@ def test_getitem_intlist_intindex_periodvalues(self):
ser = Series(period_range("2000-01-01", periods=10, freq="D"))
result = ser[[2, 4]]
- exp = pd.Series(
+ exp = Series(
[pd.Period("2000-01-03", freq="D"), pd.Period("2000-01-05", freq="D")],
index=[2, 4],
dtype="Period[D]",
@@ -134,6 +134,6 @@ def test_getitem_generator(string_series):
def test_getitem_ndim_deprecated():
- s = pd.Series([0, 1])
+ s = Series([0, 1])
with tm.assert_produces_warning(FutureWarning):
s[:, None]
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index fbdac2bb2d8e8..3d927a80a157c 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -132,7 +132,7 @@ def test_getitem_fancy(string_series, object_series):
def test_type_promotion():
# GH12599
- s = pd.Series(dtype=object)
+ s = Series(dtype=object)
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
@@ -144,14 +144,14 @@ def test_type_promotion():
"result_1, duplicate_item, expected_1",
[
[
- pd.Series({1: 12, 2: [1, 2, 2, 3]}),
- pd.Series({1: 313}),
- pd.Series({1: 12}, dtype=object),
+ Series({1: 12, 2: [1, 2, 2, 3]}),
+ Series({1: 313}),
+ Series({1: 12}, dtype=object),
],
[
- pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
- pd.Series({1: [1, 2, 3]}),
- pd.Series({1: [1, 2, 3]}),
+ Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
+ Series({1: [1, 2, 3]}),
+ Series({1: [1, 2, 3]}),
],
],
)
@@ -205,7 +205,7 @@ def test_series_box_timestamp():
def test_series_box_timedelta():
rng = pd.timedelta_range("1 day 1 s", periods=5, freq="h")
- ser = pd.Series(rng)
+ ser = Series(rng)
assert isinstance(ser[0], Timedelta)
assert isinstance(ser.at[1], Timedelta)
assert isinstance(ser.iat[2], Timedelta)
@@ -262,7 +262,7 @@ def test_setitem_ambiguous_keyerror():
def test_getitem_dataframe():
rng = list(range(10))
- s = pd.Series(10, index=rng)
+ s = Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
msg = (
"Indexing a Series with DataFrame is not supported, "
@@ -299,15 +299,15 @@ def test_setitem(datetime_series, string_series):
def test_setitem_empty_series():
# Test for issue #10193
key = pd.Timestamp("2012-01-01")
- series = pd.Series(dtype=object)
+ series = Series(dtype=object)
series[key] = 47
- expected = pd.Series(47, [key])
+ expected = Series(47, [key])
tm.assert_series_equal(series, expected)
# GH#33573 our index should retain its freq
- series = pd.Series([], pd.DatetimeIndex([], freq="D"), dtype=object)
+ series = Series([], pd.DatetimeIndex([], freq="D"), dtype=object)
series[key] = 47
- expected = pd.Series(47, pd.DatetimeIndex([key], freq="D"))
+ expected = Series(47, pd.DatetimeIndex([key], freq="D"))
tm.assert_series_equal(series, expected)
assert series.index.freq == expected.index.freq
@@ -365,7 +365,7 @@ def test_setslice(datetime_series):
def test_2d_to_1d_assignment_raises():
x = np.random.randn(2, 2)
- y = pd.Series(range(2))
+ y = Series(range(2))
msg = "|".join(
[
@@ -409,13 +409,13 @@ def test_basic_getitem_setitem_corner(datetime_series):
@pytest.mark.parametrize("tz", ["US/Eastern", "UTC", "Asia/Tokyo"])
def test_setitem_with_tz(tz):
- orig = pd.Series(pd.date_range("2016-01-01", freq="H", periods=3, tz=tz))
+ orig = Series(pd.date_range("2016-01-01", freq="H", periods=3, tz=tz))
assert orig.dtype == f"datetime64[ns, {tz}]"
# scalar
s = orig.copy()
s[1] = pd.Timestamp("2011-01-01", tz=tz)
- exp = pd.Series(
+ exp = Series(
[
pd.Timestamp("2016-01-01 00:00", tz=tz),
pd.Timestamp("2011-01-01 00:00", tz=tz),
@@ -433,14 +433,14 @@ def test_setitem_with_tz(tz):
tm.assert_series_equal(s, exp)
# vector
- vals = pd.Series(
+ vals = Series(
[pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2012-01-01", tz=tz)],
index=[1, 2],
)
assert vals.dtype == f"datetime64[ns, {tz}]"
s[[1, 2]] = vals
- exp = pd.Series(
+ exp = Series(
[
pd.Timestamp("2016-01-01 00:00", tz=tz),
pd.Timestamp("2011-01-01 00:00", tz=tz),
@@ -461,13 +461,13 @@ def test_setitem_with_tz(tz):
def test_setitem_with_tz_dst():
# GH XXX TODO: fill in GH ref
tz = "US/Eastern"
- orig = pd.Series(pd.date_range("2016-11-06", freq="H", periods=3, tz=tz))
+ orig = Series(pd.date_range("2016-11-06", freq="H", periods=3, tz=tz))
assert orig.dtype == f"datetime64[ns, {tz}]"
# scalar
s = orig.copy()
s[1] = pd.Timestamp("2011-01-01", tz=tz)
- exp = pd.Series(
+ exp = Series(
[
pd.Timestamp("2016-11-06 00:00-04:00", tz=tz),
pd.Timestamp("2011-01-01 00:00-05:00", tz=tz),
@@ -485,14 +485,14 @@ def test_setitem_with_tz_dst():
tm.assert_series_equal(s, exp)
# vector
- vals = pd.Series(
+ vals = Series(
[pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2012-01-01", tz=tz)],
index=[1, 2],
)
assert vals.dtype == f"datetime64[ns, {tz}]"
s[[1, 2]] = vals
- exp = pd.Series(
+ exp = Series(
[
pd.Timestamp("2016-11-06 00:00", tz=tz),
pd.Timestamp("2011-01-01 00:00", tz=tz),
@@ -547,7 +547,7 @@ def test_categorical_assigning_ops():
def test_getitem_categorical_str():
# GH#31765
- ser = pd.Series(range(5), index=pd.Categorical(["a", "b", "c", "a", "b"]))
+ ser = Series(range(5), index=pd.Categorical(["a", "b", "c", "a", "b"]))
result = ser["a"]
expected = ser.iloc[[0, 3]]
tm.assert_series_equal(result, expected)
@@ -646,7 +646,7 @@ def test_timedelta_assignment():
# GH 14155
s = Series(10 * [np.timedelta64(10, "m")])
s.loc[[1, 2, 3]] = np.timedelta64(20, "m")
- expected = pd.Series(10 * [np.timedelta64(10, "m")])
+ expected = Series(10 * [np.timedelta64(10, "m")])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, "m"))
tm.assert_series_equal(s, expected)
@@ -665,8 +665,8 @@ def test_dt64_series_assign_nat(nat_val, should_cast, tz):
# into a datetime64 series. Others should coerce to object
# and retain their dtypes.
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
- base = pd.Series(dti)
- expected = pd.Series([pd.NaT] + list(dti[1:]), dtype=dti.dtype)
+ base = Series(dti)
+ expected = Series([pd.NaT] + list(dti[1:]), dtype=dti.dtype)
if not should_cast:
expected = expected.astype(object)
@@ -695,8 +695,8 @@ def test_td64_series_assign_nat(nat_val, should_cast):
# some nat-like values should be cast to timedelta64 when inserting
# into a timedelta64 series. Others should coerce to object
# and retain their dtypes.
- base = pd.Series([0, 1, 2], dtype="m8[ns]")
- expected = pd.Series([pd.NaT, 1, 2], dtype="m8[ns]")
+ base = Series([0, 1, 2], dtype="m8[ns]")
+ expected = Series([pd.NaT, 1, 2], dtype="m8[ns]")
if not should_cast:
expected = expected.astype(object)
@@ -723,14 +723,14 @@ def test_td64_series_assign_nat(nat_val, should_cast):
)
def test_append_timedelta_does_not_cast(td):
# GH#22717 inserting a Timedelta should _not_ cast to int64
- expected = pd.Series(["x", td], index=[0, "td"], dtype=object)
+ expected = Series(["x", td], index=[0, "td"], dtype=object)
- ser = pd.Series(["x"])
+ ser = Series(["x"])
ser["td"] = td
tm.assert_series_equal(ser, expected)
assert isinstance(ser["td"], pd.Timedelta)
- ser = pd.Series(["x"])
+ ser = Series(["x"])
ser.loc["td"] = pd.Timedelta("9 days")
tm.assert_series_equal(ser, expected)
assert isinstance(ser["td"], pd.Timedelta)
@@ -771,7 +771,7 @@ def test_underlying_data_conversion():
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df["c"] = np.nan
- df["c"].update(pd.Series(["foo"], index=[0]))
+ df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=["foo", np.nan]))
tm.assert_frame_equal(df, expected)
@@ -878,9 +878,9 @@ def test_pop():
def test_uint_drop(any_int_dtype):
# see GH18311
# assigning series.loc[0] = 4 changed series.dtype to int
- series = pd.Series([1, 2, 3], dtype=any_int_dtype)
+ series = Series([1, 2, 3], dtype=any_int_dtype)
series.loc[0] = 4
- expected = pd.Series([4, 2, 3], dtype=any_int_dtype)
+ expected = Series([4, 2, 3], dtype=any_int_dtype)
tm.assert_series_equal(series, expected)
@@ -888,7 +888,7 @@ def test_getitem_unrecognized_scalar():
# GH#32684 a scalar key that is not recognized by lib.is_scalar
# a series that might be produced via `frame.dtypes`
- ser = pd.Series([1, 2], index=[np.dtype("O"), np.dtype("i8")])
+ ser = Series([1, 2], index=[np.dtype("O"), np.dtype("i8")])
key = ser.index[1]
@@ -949,7 +949,7 @@ def assert_slices_equivalent(l_slc, i_slc):
def test_tuple_index():
# GH 35534 - Selecting values when a Series has an Index of tuples
- s = pd.Series([1, 2], index=[("a",), ("b",)])
+ s = Series([1, 2], index=[("a",), ("b",)])
assert s[("a",)] == 1
assert s[("b",)] == 2
s[("b",)] = 3
@@ -959,7 +959,7 @@ def test_tuple_index():
def test_frozenset_index():
# GH35747 - Selecting values when a Series has an Index of frozenset
idx0, idx1 = frozenset("a"), frozenset("b")
- s = pd.Series([1, 2], index=[idx0, idx1])
+ s = Series([1, 2], index=[idx0, idx1])
assert s[idx0] == 1
assert s[idx1] == 2
s[idx1] = 3
diff --git a/pandas/tests/series/indexing/test_multiindex.py b/pandas/tests/series/indexing/test_multiindex.py
index ed8bc52db7a9e..ff9db275ff2df 100644
--- a/pandas/tests/series/indexing/test_multiindex.py
+++ b/pandas/tests/series/indexing/test_multiindex.py
@@ -54,7 +54,7 @@ def test_nat_multi_index(ix_data, exp_data):
def test_loc_getitem_multiindex_nonunique_len_zero():
# GH#13691
mi = pd.MultiIndex.from_product([[0], [1, 1]])
- ser = pd.Series(0, index=mi)
+ ser = Series(0, index=mi)
res = ser.loc[[]]
diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
index cbb34d595eb9b..404136bdfa2db 100644
--- a/pandas/tests/series/indexing/test_where.py
+++ b/pandas/tests/series/indexing/test_where.py
@@ -349,7 +349,7 @@ def test_where_dups():
def test_where_numeric_with_string():
# GH 9280
- s = pd.Series([1, 2, 3])
+ s = Series([1, 2, 3])
w = s.where(s > 1, "X")
assert not is_integer(w[0])
@@ -425,15 +425,15 @@ def test_where_datetime_conversion():
def test_where_dt_tz_values(tz_naive_fixture):
- ser1 = pd.Series(
+ ser1 = Series(
pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture)
)
- ser2 = pd.Series(
+ ser2 = Series(
pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture)
)
- mask = pd.Series([True, True, False])
+ mask = Series([True, True, False])
result = ser1.where(mask, ser2)
- exp = pd.Series(
+ exp = Series(
pd.DatetimeIndex(["20150101", "20150102", "20160516"], tz=tz_naive_fixture)
)
tm.assert_series_equal(exp, result)
@@ -441,9 +441,9 @@ def test_where_dt_tz_values(tz_naive_fixture):
def test_where_sparse():
# GH#17198 make sure we dont get an AttributeError for sp_index
- ser = pd.Series(pd.arrays.SparseArray([1, 2]))
+ ser = Series(pd.arrays.SparseArray([1, 2]))
result = ser.where(ser >= 2, 0)
- expected = pd.Series(pd.arrays.SparseArray([0, 2]))
+ expected = Series(pd.arrays.SparseArray([0, 2]))
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_align.py b/pandas/tests/series/methods/test_align.py
index 974ba5d1e35a7..ef2b07d592b95 100644
--- a/pandas/tests/series/methods/test_align.py
+++ b/pandas/tests/series/methods/test_align.py
@@ -124,8 +124,8 @@ def test_align_multiindex():
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = pd.Index(range(2), name="b")
- s1 = pd.Series(np.arange(12, dtype="int64"), index=midx)
- s2 = pd.Series(np.arange(2, dtype="int64"), index=idx)
+ s1 = Series(np.arange(12, dtype="int64"), index=midx)
+ s2 = Series(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join="left")
@@ -134,7 +134,7 @@ def test_align_multiindex():
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
- expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
+ expr = Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
@@ -144,10 +144,10 @@ def test_align_multiindex():
exp_idx = pd.MultiIndex.from_product(
[range(2), range(2), range(2)], names=("a", "b", "c")
)
- expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
+ expl = Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
- expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
+ expr = Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
@@ -155,7 +155,7 @@ def test_align_multiindex():
@pytest.mark.parametrize("method", ["backfill", "bfill", "pad", "ffill", None])
def test_align_with_dataframe_method(method):
# GH31788
- ser = pd.Series(range(3), index=range(3))
+ ser = Series(range(3), index=range(3))
df = pd.DataFrame(0.0, index=range(3), columns=range(3))
result_ser, result_df = ser.align(df, method=method)
diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py
index 82bde7c233626..e1d0bced55d98 100644
--- a/pandas/tests/series/methods/test_append.py
+++ b/pandas/tests/series/methods/test_append.py
@@ -29,14 +29,14 @@ def test_append_many(self, datetime_series):
def test_append_duplicates(self):
# GH 13677
- s1 = pd.Series([1, 2, 3])
- s2 = pd.Series([4, 5, 6])
- exp = pd.Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2])
+ s1 = Series([1, 2, 3])
+ s2 = Series([4, 5, 6])
+ exp = Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(s1.append(s2), exp)
tm.assert_series_equal(pd.concat([s1, s2]), exp)
# the result must have RangeIndex
- exp = pd.Series([1, 2, 3, 4, 5, 6])
+ exp = Series([1, 2, 3, 4, 5, 6])
tm.assert_series_equal(
s1.append(s2, ignore_index=True), exp, check_index_type=True
)
@@ -52,7 +52,7 @@ def test_append_duplicates(self):
def test_append_tuples(self):
# GH 28410
- s = pd.Series([1, 2, 3])
+ s = Series([1, 2, 3])
list_input = [s, s]
tuple_input = (s, s)
diff --git a/pandas/tests/series/methods/test_clip.py b/pandas/tests/series/methods/test_clip.py
index 37764d3b82c2d..5a5a397222b87 100644
--- a/pandas/tests/series/methods/test_clip.py
+++ b/pandas/tests/series/methods/test_clip.py
@@ -62,9 +62,9 @@ def test_clip_against_series(self):
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
def test_clip_against_list_like(self, inplace, upper):
# GH#15390
- original = pd.Series([5, 6, 7])
+ original = Series([5, 6, 7])
result = original.clip(upper=upper, inplace=inplace)
- expected = pd.Series([1, 2, 3])
+ expected = Series([1, 2, 3])
if inplace:
result = original
diff --git a/pandas/tests/series/methods/test_combine_first.py b/pandas/tests/series/methods/test_combine_first.py
index 1ee55fbe39513..2a02406f50750 100644
--- a/pandas/tests/series/methods/test_combine_first.py
+++ b/pandas/tests/series/methods/test_combine_first.py
@@ -76,11 +76,11 @@ def test_combine_first_dt64(self):
tm.assert_series_equal(rs, xp)
def test_combine_first_dt_tz_values(self, tz_naive_fixture):
- ser1 = pd.Series(
+ ser1 = Series(
pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture),
name="ser1",
)
- ser2 = pd.Series(
+ ser2 = Series(
pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture),
index=[2, 3, 4],
name="ser2",
@@ -90,5 +90,5 @@ def test_combine_first_dt_tz_values(self, tz_naive_fixture):
["20150101", "20150102", "20150103", "20160515", "20160516"],
tz=tz_naive_fixture,
)
- exp = pd.Series(exp_vals, name="ser1")
+ exp = Series(exp_vals, name="ser1")
tm.assert_series_equal(exp, result)
diff --git a/pandas/tests/series/methods/test_count.py b/pandas/tests/series/methods/test_count.py
index 19290b6a5c23f..3c5957706b144 100644
--- a/pandas/tests/series/methods/test_count.py
+++ b/pandas/tests/series/methods/test_count.py
@@ -8,7 +8,7 @@
class TestSeriesCount:
def test_count_level_without_multiindex(self):
- ser = pd.Series(range(3))
+ ser = Series(range(3))
msg = "Series.count level is only valid with a MultiIndex"
with pytest.raises(ValueError, match=msg):
@@ -33,7 +33,7 @@ def test_count(self, datetime_series):
# GH#29478
with pd.option_context("use_inf_as_na", True):
- assert pd.Series([pd.Timestamp("1990/1/1")]).count() == 1
+ assert Series([pd.Timestamp("1990/1/1")]).count() == 1
def test_count_categorical(self):
diff --git a/pandas/tests/series/methods/test_cov_corr.py b/pandas/tests/series/methods/test_cov_corr.py
index 282f499506aae..f01ed73c0165f 100644
--- a/pandas/tests/series/methods/test_cov_corr.py
+++ b/pandas/tests/series/methods/test_cov_corr.py
@@ -135,8 +135,8 @@ def test_corr_rank(self):
def test_corr_invalid_method(self):
# GH PR #22298
- s1 = pd.Series(np.random.randn(10))
- s2 = pd.Series(np.random.randn(10))
+ s1 = Series(np.random.randn(10))
+ s2 = Series(np.random.randn(10))
msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
with pytest.raises(ValueError, match=msg):
s1.corr(s2, method="____")
diff --git a/pandas/tests/series/methods/test_drop.py b/pandas/tests/series/methods/test_drop.py
index 197fe9ff68df2..7ded8ac902d78 100644
--- a/pandas/tests/series/methods/test_drop.py
+++ b/pandas/tests/series/methods/test_drop.py
@@ -1,6 +1,5 @@
import pytest
-import pandas as pd
from pandas import Series
import pandas._testing as tm
@@ -66,8 +65,8 @@ def test_drop_with_ignore_errors():
def test_drop_empty_list(index, drop_labels):
# GH 21494
expected_index = [i for i in index if i not in drop_labels]
- series = pd.Series(index=index, dtype=object).drop(drop_labels)
- expected = pd.Series(index=expected_index, dtype=object)
+ series = Series(index=index, dtype=object).drop(drop_labels)
+ expected = Series(index=expected_index, dtype=object)
tm.assert_series_equal(series, expected)
@@ -82,6 +81,6 @@ def test_drop_empty_list(index, drop_labels):
def test_drop_non_empty_list(data, index, drop_labels):
# GH 21494 and GH 16877
dtype = object if data is None else None
- ser = pd.Series(data=data, index=index, dtype=dtype)
+ ser = Series(data=data, index=index, dtype=dtype)
with pytest.raises(KeyError, match="not found in axis"):
ser.drop(drop_labels)
diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py
index 9fc468221ee2d..1b05f72f5cf4d 100644
--- a/pandas/tests/series/methods/test_interpolate.py
+++ b/pandas/tests/series/methods/test_interpolate.py
@@ -314,14 +314,14 @@ def test_interp_limit(self):
@pytest.mark.parametrize("limit", [-1, 0])
def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit):
# GH 9217: make sure limit is greater than zero.
- s = pd.Series([1, 2, np.nan, 4])
+ s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
with pytest.raises(ValueError, match="Limit must be greater than 0"):
s.interpolate(limit=limit, method=method, **kwargs)
def test_interpolate_invalid_float_limit(self, nontemporal_method):
# GH 9217: make sure limit is an integer.
- s = pd.Series([1, 2, np.nan, 4])
+ s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
limit = 2.0
with pytest.raises(ValueError, match="Limit must be an integer"):
@@ -561,17 +561,17 @@ def test_interp_datetime64(self, method, tz_naive_fixture):
def test_interp_pad_datetime64tz_values(self):
# GH#27628 missing.interpolate_2d should handle datetimetz values
dti = pd.date_range("2015-04-05", periods=3, tz="US/Central")
- ser = pd.Series(dti)
+ ser = Series(dti)
ser[1] = pd.NaT
result = ser.interpolate(method="pad")
- expected = pd.Series(dti)
+ expected = Series(dti)
expected[1] = expected[0]
tm.assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
- s = pd.Series([1.0, 2.0, 3.0])
+ s = Series([1.0, 2.0, 3.0])
result = s.interpolate(limit=1)
expected = s
tm.assert_series_equal(result, expected)
@@ -654,13 +654,13 @@ def test_series_interpolate_method_values(self):
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range("1/1/2012", periods=4, freq="12D")
- ts = pd.Series([0, 12, 24, 36], index)
+ ts = Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method="time")
index = pd.date_range("1/1/2012", periods=4, freq="12H")
- ts = pd.Series([0, 12, 24, 36], index)
+ ts = Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method="time")
@@ -684,7 +684,7 @@ def test_interp_non_timedelta_index(self, interp_methods_ind, ind):
if method == "linear":
result = df[0].interpolate(**kwargs)
- expected = pd.Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
+ expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
tm.assert_series_equal(result, expected)
else:
expected_error = (
@@ -712,7 +712,7 @@ def test_interpolate_timedelta_index(self, interp_methods_ind):
if method in {"linear", "pchip"}:
result = df[0].interpolate(method=method, **kwargs)
- expected = pd.Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
+ expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
tm.assert_series_equal(result, expected)
else:
pytest.skip(
@@ -725,7 +725,7 @@ def test_interpolate_timedelta_index(self, interp_methods_ind):
)
def test_interpolate_unsorted_index(self, ascending, expected_values):
# GH 21037
- ts = pd.Series(data=[10, 9, np.nan, 2, 1], index=[10, 9, 3, 2, 1])
+ ts = Series(data=[10, 9, np.nan, 2, 1], index=[10, 9, 3, 2, 1])
result = ts.sort_index(ascending=ascending).interpolate(method="index")
- expected = pd.Series(data=expected_values, index=expected_values, dtype=float)
+ expected = Series(data=expected_values, index=expected_values, dtype=float)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_quantile.py b/pandas/tests/series/methods/test_quantile.py
index 79f50afca658f..964d62602edaa 100644
--- a/pandas/tests/series/methods/test_quantile.py
+++ b/pandas/tests/series/methods/test_quantile.py
@@ -45,7 +45,7 @@ def test_quantile_multi(self, datetime_series):
qs = [0.1, 0.9]
result = datetime_series.quantile(qs)
- expected = pd.Series(
+ expected = Series(
[
np.percentile(datetime_series.dropna(), 10),
np.percentile(datetime_series.dropna(), 90),
@@ -66,7 +66,7 @@ def test_quantile_multi(self, datetime_series):
tm.assert_series_equal(result, expected)
result = datetime_series.quantile([])
- expected = pd.Series(
+ expected = Series(
[], name=datetime_series.name, index=Index([], dtype=float), dtype="float64"
)
tm.assert_series_equal(result, expected)
@@ -87,18 +87,18 @@ def test_quantile_interpolation_dtype(self):
# GH #10174
# interpolation = linear (default case)
- q = pd.Series([1, 3, 4]).quantile(0.5, interpolation="lower")
+ q = Series([1, 3, 4]).quantile(0.5, interpolation="lower")
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
- q = pd.Series([1, 3, 4]).quantile(0.5, interpolation="higher")
+ q = Series([1, 3, 4]).quantile(0.5, interpolation="higher")
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
def test_quantile_nan(self):
# GH 13098
- s = pd.Series([1, 2, 3, 4, np.nan])
+ s = Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
assert result == expected
@@ -112,10 +112,10 @@ def test_quantile_nan(self):
assert np.isnan(res)
res = s.quantile([0.5])
- tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
+ tm.assert_series_equal(res, Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
- tm.assert_series_equal(res, pd.Series([np.nan, np.nan], index=[0.2, 0.3]))
+ tm.assert_series_equal(res, Series([np.nan, np.nan], index=[0.2, 0.3]))
@pytest.mark.parametrize(
"case",
@@ -153,12 +153,12 @@ def test_quantile_nan(self):
],
)
def test_quantile_box(self, case):
- s = pd.Series(case, name="XXX")
+ s = Series(case, name="XXX")
res = s.quantile(0.5)
assert res == case[1]
res = s.quantile([0.5])
- exp = pd.Series([case[1]], index=[0.5], name="XXX")
+ exp = Series([case[1]], index=[0.5], name="XXX")
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
@@ -171,16 +171,16 @@ def test_quantile_nat(self):
assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
- tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
+ tm.assert_series_equal(res, Series([pd.NaT], index=[0.5]))
@pytest.mark.parametrize(
"values, dtype",
[([0, 0, 0, 1, 2, 3], "Sparse[int]"), ([0.0, None, 1.0, 2.0], "Sparse[float]")],
)
def test_quantile_sparse(self, values, dtype):
- ser = pd.Series(values, dtype=dtype)
+ ser = Series(values, dtype=dtype)
result = ser.quantile([0.5])
- expected = pd.Series(np.asarray(ser)).quantile([0.5])
+ expected = Series(np.asarray(ser)).quantile([0.5])
tm.assert_series_equal(result, expected)
def test_quantile_empty(self):
diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py
index da6407c73104c..20bb3e008c792 100644
--- a/pandas/tests/series/methods/test_shift.py
+++ b/pandas/tests/series/methods/test_shift.py
@@ -135,14 +135,14 @@ def test_shift_fill_value(self):
result = ts.shift(2, fill_value=0.0)
tm.assert_series_equal(result, exp)
- ts = pd.Series([1, 2, 3])
+ ts = Series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert res.dtype == ts.dtype
def test_shift_categorical_fill_value(self):
- ts = pd.Series(["a", "b", "c", "d"], dtype="category")
+ ts = Series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
- expected = pd.Series(
+ expected = Series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
@@ -302,7 +302,7 @@ def test_shift_object_non_scalar_fill(self):
def test_shift_categorical(self):
# GH#9416
- s = pd.Series(["a", "b", "c", "d"], dtype="category")
+ s = Series(["a", "b", "c", "d"], dtype="category")
tm.assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna())
@@ -321,25 +321,25 @@ def test_shift_categorical(self):
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
- ser = pd.Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
+ ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
with tm.assert_produces_warning(FutureWarning):
result = ser.shift(1, fill_value=0)
- expected = pd.Series([pd.Timestamp(0), ser[0]])
+ expected = Series([pd.Timestamp(0), ser[0]])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods):
# GH#21275
- ser = pd.Series(
+ ser = Series(
range(periods),
index=pd.date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = ser.shift(1, "2H")
- expected = pd.Series(
+ expected = Series(
range(periods),
index=pd.date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
@@ -353,7 +353,7 @@ def test_shift_non_writable_array(self, input_data, output_data):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
- result = pd.Series(input_data).shift(1)
- expected = pd.Series(output_data, dtype="float64")
+ result = Series(input_data).shift(1)
+ expected = Series(output_data, dtype="float64")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py
index 858b1d6b4df8c..b03f516eeffc5 100644
--- a/pandas/tests/series/methods/test_truncate.py
+++ b/pandas/tests/series/methods/test_truncate.py
@@ -67,14 +67,14 @@ def test_truncate(self, datetime_series):
def test_truncate_nonsortedindex(self):
# GH#17935
- s = pd.Series(["a", "b", "c", "d", "e"], index=[5, 3, 2, 9, 0])
+ s = Series(["a", "b", "c", "d", "e"], index=[5, 3, 2, 9, 0])
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
s.truncate(before=3, after=9)
rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
- ts = pd.Series(np.random.randn(len(rng)), index=rng)
+ ts = Series(np.random.randn(len(rng)), index=rng)
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
@@ -92,7 +92,7 @@ def test_truncate_decreasing_index(self, before, after, indices, klass):
before = pd.Timestamp(before) if before is not None else None
after = pd.Timestamp(after) if after is not None else None
indices = [pd.Timestamp(i) for i in indices]
- values = pd.Series(range(len(idx)), index=idx)
+ values = Series(range(len(idx)), index=idx)
result = values.truncate(before=before, after=after)
expected = values.loc[indices]
tm.assert_series_equal(result, expected)
@@ -116,27 +116,27 @@ def test_truncate_periodindex(self):
idx1 = pd.PeriodIndex(
[pd.Period("2017-09-02"), pd.Period("2017-09-02"), pd.Period("2017-09-03")]
)
- series1 = pd.Series([1, 2, 3], index=idx1)
+ series1 = Series([1, 2, 3], index=idx1)
result1 = series1.truncate(after="2017-09-02")
expected_idx1 = pd.PeriodIndex(
[pd.Period("2017-09-02"), pd.Period("2017-09-02")]
)
- tm.assert_series_equal(result1, pd.Series([1, 2], index=expected_idx1))
+ tm.assert_series_equal(result1, Series([1, 2], index=expected_idx1))
idx2 = pd.PeriodIndex(
[pd.Period("2017-09-03"), pd.Period("2017-09-02"), pd.Period("2017-09-03")]
)
- series2 = pd.Series([1, 2, 3], index=idx2)
+ series2 = Series([1, 2, 3], index=idx2)
result2 = series2.sort_index().truncate(after="2017-09-02")
expected_idx2 = pd.PeriodIndex([pd.Period("2017-09-02")])
- tm.assert_series_equal(result2, pd.Series([2], index=expected_idx2))
+ tm.assert_series_equal(result2, Series([2], index=expected_idx2))
def test_truncate_multiindex(self):
# GH 34564
mi = pd.MultiIndex.from_product([[1, 2, 3, 4], ["A", "B"]], names=["L1", "L2"])
- s1 = pd.Series(range(mi.shape[0]), index=mi, name="col")
+ s1 = Series(range(mi.shape[0]), index=mi, name="col")
result = s1.truncate(before=2, after=3)
df = pd.DataFrame.from_dict(
@@ -150,7 +150,7 @@ def test_truncate_multiindex(self):
def test_truncate_one_element_series(self):
# GH 35544
- series = pd.Series([0.1], index=pd.DatetimeIndex(["2020-08-04"]))
+ series = Series([0.1], index=pd.DatetimeIndex(["2020-08-04"]))
before = pd.Timestamp("2020-08-02")
after = pd.Timestamp("2020-08-04")
diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py
index d651315d64561..d8099e84a324d 100644
--- a/pandas/tests/series/methods/test_unstack.py
+++ b/pandas/tests/series/methods/test_unstack.py
@@ -41,7 +41,7 @@ def test_unstack():
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
- ts = pd.Series([1, 2], index=idx)
+ ts = Series([1, 2], index=idx)
left = ts.unstack()
right = DataFrame(
[[np.nan, 1], [2, np.nan]], index=[101, 102], columns=[np.nan, 3.5]
@@ -55,7 +55,7 @@ def test_unstack():
[1, 2, 1, 1, np.nan],
]
)
- ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
+ ts = Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame(
[[1.0, 1.3], [1.1, np.nan], [np.nan, 1.4], [1.2, np.nan]],
columns=["cat", "dog"],
@@ -70,7 +70,7 @@ def test_unstack_tuplename_in_multiindex():
idx = pd.MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
- ser = pd.Series(1, index=idx)
+ ser = Series(1, index=idx)
result = ser.unstack(("A", "a"))
expected = pd.DataFrame(
@@ -109,7 +109,7 @@ def test_unstack_mixed_type_name_in_multiindex(
idx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
- ser = pd.Series(1, index=idx)
+ ser = Series(1, index=idx)
result = ser.unstack(unstack_idx)
expected = pd.DataFrame(
@@ -121,7 +121,7 @@ def test_unstack_mixed_type_name_in_multiindex(
def test_unstack_multi_index_categorical_values():
mi = tm.makeTimeDataFrame().stack().index.rename(["major", "minor"])
- ser = pd.Series(["foo"] * len(mi), index=mi, name="category", dtype="category")
+ ser = Series(["foo"] * len(mi), index=mi, name="category", dtype="category")
result = ser.unstack()
diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py
index f97362ce9c2a9..37da31fb2329a 100644
--- a/pandas/tests/series/methods/test_value_counts.py
+++ b/pandas/tests/series/methods/test_value_counts.py
@@ -21,16 +21,16 @@ def test_value_counts_datetime(self):
exp_idx = pd.DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 11:00", "2011-01-01 10:00"]
)
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+ exp = Series([3, 2, 1], index=exp_idx, name="xxx")
- ser = pd.Series(values, name="xxx")
+ ser = Series(values, name="xxx")
tm.assert_series_equal(ser.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.DatetimeIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ exp = Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(ser.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
@@ -48,14 +48,14 @@ def test_value_counts_datetime_tz(self):
["2011-01-01 09:00", "2011-01-01 11:00", "2011-01-01 10:00"],
tz="US/Eastern",
)
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+ exp = Series([3, 2, 1], index=exp_idx, name="xxx")
- ser = pd.Series(values, name="xxx")
+ ser = Series(values, name="xxx")
tm.assert_series_equal(ser.value_counts(), exp)
idx = pd.DatetimeIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ exp = Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(ser.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
@@ -70,16 +70,16 @@ def test_value_counts_period(self):
]
exp_idx = pd.PeriodIndex(["2011-01", "2011-03", "2011-02"], freq="M")
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+ exp = Series([3, 2, 1], index=exp_idx, name="xxx")
- ser = pd.Series(values, name="xxx")
+ ser = Series(values, name="xxx")
tm.assert_series_equal(ser.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.PeriodIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ exp = Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(ser.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
@@ -88,16 +88,16 @@ def test_value_counts_categorical_ordered(self):
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=True)
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+ exp = Series([3, 2, 1], index=exp_idx, name="xxx")
- ser = pd.Series(values, name="xxx")
+ ser = Series(values, name="xxx")
tm.assert_series_equal(ser.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ exp = Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(ser.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
@@ -105,16 +105,16 @@ def test_value_counts_categorical_not_ordered(self):
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=False)
- exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
+ exp = Series([3, 2, 1], index=exp_idx, name="xxx")
- ser = pd.Series(values, name="xxx")
+ ser = Series(values, name="xxx")
tm.assert_series_equal(ser.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
- exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
+ exp = Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(ser.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
@@ -183,19 +183,19 @@ def test_value_counts_categorical_with_nan(self):
"ser, dropna, exp",
[
(
- pd.Series([False, True, True, pd.NA]),
+ Series([False, True, True, pd.NA]),
False,
- pd.Series([2, 1, 1], index=[True, False, pd.NA]),
+ Series([2, 1, 1], index=[True, False, pd.NA]),
),
(
- pd.Series([False, True, True, pd.NA]),
+ Series([False, True, True, pd.NA]),
True,
- pd.Series([2, 1], index=[True, False]),
+ Series([2, 1], index=[True, False]),
),
(
- pd.Series(range(3), index=[True, False, np.nan]).index,
+ Series(range(3), index=[True, False, np.nan]).index,
False,
- pd.Series([1, 1, 1], index=[True, False, pd.NA]),
+ Series([1, 1, 1], index=[True, False, pd.NA]),
),
],
)
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index d92edb6fe149a..92cd1e546b54d 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -179,7 +179,7 @@ def test_constructor_dict_timedelta_index(self):
tm.assert_series_equal(result, expected)
def test_sparse_accessor_updates_on_inplace(self):
- s = pd.Series([1, 1, 2, 3], dtype="Sparse[int]")
+ s = Series([1, 1, 2, 3], dtype="Sparse[int]")
return_value = s.drop([0, 1], inplace=True)
assert return_value is None
assert s.sparse.density == 1.0
@@ -257,7 +257,7 @@ def get_dir(s):
)
def test_index_tab_completion(self, index):
# dir contains string-like values of the Index.
- s = pd.Series(index=index, dtype=object)
+ s = Series(index=index, dtype=object)
dir_s = dir(s)
for i, x in enumerate(s.index.unique(level=0)):
if i < 100:
@@ -460,7 +460,7 @@ def f(x):
tm.assert_almost_equal(s.ravel(order="F"), s.values.ravel(order="F"))
def test_str_accessor_updates_on_inplace(self):
- s = pd.Series(list("abc"))
+ s = Series(list("abc"))
return_value = s.drop([0], inplace=True)
assert return_value is None
assert len(s.str.lower()) == 2
@@ -479,11 +479,11 @@ def test_str_attribute(self):
s.str.repeat(2)
def test_empty_method(self):
- s_empty = pd.Series(dtype=object)
+ s_empty = Series(dtype=object)
assert s_empty.empty
- s2 = pd.Series(index=[1], dtype=object)
- for full_series in [pd.Series([1]), s2]:
+ s2 = Series(index=[1], dtype=object)
+ for full_series in [Series([1]), s2]:
assert not full_series.empty
@async_mark()
@@ -493,7 +493,7 @@ async def test_tab_complete_warning(self, ip):
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
- code = "import pandas as pd; s = pd.Series(dtype=object)"
+ code = "import pandas as pd; s = Series(dtype=object)"
await ip.run_code(code)
# TODO: remove it when Ipython updates
@@ -518,7 +518,7 @@ def test_integer_series_size(self):
assert s.size == 9
def test_attrs(self):
- s = pd.Series([0, 1], name="abc")
+ s = Series([0, 1], name="abc")
assert s.attrs == {}
s.attrs["version"] = 1
result = s + 1
@@ -526,7 +526,7 @@ def test_attrs(self):
@pytest.mark.parametrize("allows_duplicate_labels", [True, False, None])
def test_set_flags(self, allows_duplicate_labels):
- df = pd.Series([1, 2])
+ df = Series([1, 2])
result = df.set_flags(allows_duplicate_labels=allows_duplicate_labels)
if allows_duplicate_labels is None:
# We don't update when it's not provided
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 09181201beee4..00e6fb01da424 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -191,7 +191,7 @@ def test_add_with_duplicate_index(self):
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
- expected = pd.Series([11, 12, np.nan], index=[1, 1, 2])
+ expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
@@ -258,8 +258,8 @@ def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = pd.date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
- ser = pd.Series(10, index=dti)
- ser_utc = pd.Series(10, index=dti_utc)
+ ser = Series(10, index=dti)
+ ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
@@ -276,16 +276,16 @@ class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
def test_comparison_flex_basic(self, axis, all_compare_operators):
op = all_compare_operators.strip("__")
- left = pd.Series(np.random.randn(10))
- right = pd.Series(np.random.randn(10))
+ left = Series(np.random.randn(10))
+ right = Series(np.random.randn(10))
result = getattr(left, op)(right, axis=axis)
expected = getattr(operator, op)(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_bad_axis(self, all_compare_operators):
op = all_compare_operators.strip("__")
- left = pd.Series(np.random.randn(10))
- right = pd.Series(np.random.randn(10))
+ left = Series(np.random.randn(10))
+ right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
@@ -306,7 +306,7 @@ def test_comparison_flex_alignment(self, values, op):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right)
- expected = pd.Series(values, index=list("abcd"))
+ expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
@@ -324,7 +324,7 @@ def test_comparison_flex_alignment_fill(self, values, op, fill_value):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right, fill_value=fill_value)
- expected = pd.Series(values, index=list("abcd"))
+ expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@@ -585,12 +585,12 @@ def test_ne(self):
"left, right",
[
(
- pd.Series([1, 2, 3], index=list("ABC"), name="x"),
- pd.Series([2, 2, 2], index=list("ABD"), name="x"),
+ Series([1, 2, 3], index=list("ABC"), name="x"),
+ Series([2, 2, 2], index=list("ABD"), name="x"),
),
(
- pd.Series([1, 2, 3], index=list("ABC"), name="x"),
- pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x"),
+ Series([1, 2, 3], index=list("ABC"), name="x"),
+ Series([2, 2, 2, 2], index=list("ABCD"), name="x"),
),
],
)
@@ -694,10 +694,10 @@ def test_series_add_aware_naive_raises(self):
def test_datetime_understood(self):
# Ensures it doesn't fail to create the right series
# reported in issue#16726
- series = pd.Series(pd.date_range("2012-01-01", periods=3))
+ series = Series(pd.date_range("2012-01-01", periods=3))
offset = pd.offsets.DateOffset(days=6)
result = series - offset
- expected = pd.Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]))
+ expected = Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]))
tm.assert_series_equal(result, expected)
@@ -718,8 +718,8 @@ def test_series_ops_name_retention(flex, box, names, all_binary_operators):
if op is ops.rfloordiv and box in [list, tuple]:
pytest.xfail("op fails because of inconsistent ndarray-wrapping GH#28759")
- left = pd.Series(range(10), name=names[0])
- right = pd.Series(range(10), name=names[1])
+ left = Series(range(10), name=names[0])
+ right = Series(range(10), name=names[1])
right = box(right)
if flex:
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index a950ca78fc742..491b3a62b7d73 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -179,24 +179,24 @@ def test_constructor_nan(self, input_arg):
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
- result = pd.Series(dtype=dtype, index=index)
+ result = Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- result = pd.Series(index=["b", "a", "c"])
+ result = Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
- result = pd.Series(index=[1], dtype=str)
+ result = Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
- result = pd.Series(item, index=[1], dtype=str)
+ result = Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
@@ -313,7 +313,7 @@ def test_constructor_categorical(self):
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
- expected = pd.Series([1, 2, 3], dtype="int64")
+ expected = Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
@@ -379,14 +379,14 @@ def test_constructor_categorical_with_coercion(self):
assert result == expected
def test_constructor_categorical_dtype(self):
- result = pd.Series(
+ result = Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
- result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
+ result = Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
@@ -449,8 +449,8 @@ def test_categorical_sideeffects_free(self):
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
- left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
- right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
+ left = Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
+ right = Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
@@ -533,8 +533,8 @@ def test_constructor_maskedarray(self):
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
- result = pd.Series(data)
- expected = pd.Series([np.nan, np.nan, np.nan])
+ result = Series(data)
+ expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
@@ -601,7 +601,7 @@ def test_constructor_copy(self):
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
- y = pd.Series(x, copy=True, dtype=float)
+ y = Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
@@ -628,7 +628,7 @@ def test_constructor_copy(self):
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
- s = pd.Series(index)
+ s = Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
@@ -995,8 +995,8 @@ def test_construction_interval(self, interval_constructor):
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
- result = pd.Series(data_constructor(data))
- expected = pd.Series(IntervalArray(data))
+ result = Series(data_constructor(data))
+ expected = Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@@ -1030,14 +1030,14 @@ def test_construction_consistency(self):
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
- result = pd.Series(data_constructor(data))
- expected = pd.Series(period_array(data))
+ result = Series(data_constructor(data))
+ expected = Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
- result = pd.Series(data)
+ result = Series(data)
assert result.dtype == object
assert result.tolist() == data
@@ -1473,7 +1473,7 @@ def test_constructor_datetime64(self):
def test_constructor_datetimelike_scalar_to_string_dtype(self):
# https://github.com/pandas-dev/pandas/pull/33846
result = Series("M", index=[1, 2, 3], dtype="string")
- expected = pd.Series(["M", "M", "M"], index=[1, 2, 3], dtype="string")
+ expected = Series(["M", "M", "M"], index=[1, 2, 3], dtype="string")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
@@ -1486,9 +1486,9 @@ def test_constructor_datetimelike_scalar_to_string_dtype(self):
def test_constructor_sparse_datetime64(self, values):
# https://github.com/pandas-dev/pandas/issues/35762
dtype = pd.SparseDtype("datetime64[ns]")
- result = pd.Series(values, dtype=dtype)
+ result = Series(values, dtype=dtype)
arr = pd.arrays.SparseArray(values, dtype=dtype)
- expected = pd.Series(arr)
+ expected = Series(arr)
tm.assert_series_equal(result, expected)
def test_construction_from_ordered_collection(self):
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index b0926089bd7b4..53b465fa814b3 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -192,7 +192,7 @@ def compare(s, name):
exp = Series(np.array([0, 1, 2], dtype="int64"), index=index, name="xxx")
tm.assert_series_equal(s.dt.second, exp)
- exp = pd.Series([s[0]] * 3, index=index, name="xxx")
+ exp = Series([s[0]] * 3, index=index, name="xxx")
tm.assert_series_equal(s.dt.normalize(), exp)
# periodindex
@@ -350,7 +350,7 @@ def test_dt_namespace_accessor_categorical(self):
def test_dt_tz_localize_categorical(self, tz_aware_fixture):
# GH 27952
tz = tz_aware_fixture
- datetimes = pd.Series(
+ datetimes = Series(
["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns]"
)
categorical = datetimes.astype("category")
@@ -361,7 +361,7 @@ def test_dt_tz_localize_categorical(self, tz_aware_fixture):
def test_dt_tz_convert_categorical(self, tz_aware_fixture):
# GH 27952
tz = tz_aware_fixture
- datetimes = pd.Series(
+ datetimes = Series(
["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns, MET]"
)
categorical = datetimes.astype("category")
@@ -372,7 +372,7 @@ def test_dt_tz_convert_categorical(self, tz_aware_fixture):
@pytest.mark.parametrize("accessor", ["year", "month", "day"])
def test_dt_other_accessors_categorical(self, accessor):
# GH 27952
- datetimes = pd.Series(
+ datetimes = Series(
["2018-01-01", "2018-01-01", "2019-01-02"], dtype="datetime64[ns]"
)
categorical = datetimes.astype("category")
@@ -657,16 +657,16 @@ def test_dt_timetz_accessor(self, tz_naive_fixture):
def test_setitem_with_string_index(self):
# GH 23451
- x = pd.Series([1, 2, 3], index=["Date", "b", "other"])
+ x = Series([1, 2, 3], index=["Date", "b", "other"])
x["Date"] = date.today()
assert x.Date == date.today()
assert x["Date"] == date.today()
def test_setitem_with_different_tz(self):
# GH#24024
- ser = pd.Series(pd.date_range("2000", periods=2, tz="US/Central"))
+ ser = Series(pd.date_range("2000", periods=2, tz="US/Central"))
ser[0] = pd.Timestamp("2000", tz="US/Eastern")
- expected = pd.Series(
+ expected = Series(
[
pd.Timestamp("2000-01-01 00:00:00-05:00", tz="US/Eastern"),
pd.Timestamp("2000-01-02 00:00:00-06:00", tz="US/Central"),
@@ -688,7 +688,7 @@ def test_setitem_with_different_tz(self):
],
)
def test_isocalendar(self, input_series, expected_output):
- result = pd.to_datetime(pd.Series(input_series)).dt.isocalendar()
+ result = pd.to_datetime(Series(input_series)).dt.isocalendar()
expected_frame = pd.DataFrame(
expected_output, columns=["year", "week", "day"], dtype="UInt32"
)
@@ -697,7 +697,7 @@ def test_isocalendar(self, input_series, expected_output):
def test_week_and_weekofyear_are_deprecated():
# GH#33595 Deprecate week and weekofyear
- series = pd.to_datetime(pd.Series(["2020-01-01"]))
+ series = pd.to_datetime(Series(["2020-01-01"]))
with tm.assert_produces_warning(FutureWarning):
series.dt.week
with tm.assert_produces_warning(FutureWarning):
@@ -706,7 +706,7 @@ def test_week_and_weekofyear_are_deprecated():
def test_normalize_pre_epoch_dates():
# GH: 36294
- s = pd.to_datetime(pd.Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"]))
+ s = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"]))
result = s.dt.normalize()
- expected = pd.to_datetime(pd.Series(["1969-01-01", "2016-01-01"]))
+ expected = pd.to_datetime(Series(["1969-01-01", "2016-01-01"]))
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index ae89e16ca7667..29c1728be786a 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -482,7 +482,7 @@ def test_infer_objects_series(self):
)
def test_values_compatibility(self, data):
# https://github.com/pandas-dev/pandas/issues/23995
- result = pd.Series(data).values
+ result = Series(data).values
expected = np.array(data.astype(object))
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py
index 51410fce7efae..488bd120ac405 100644
--- a/pandas/tests/series/test_internals.py
+++ b/pandas/tests/series/test_internals.py
@@ -204,31 +204,31 @@ def test_convert_preserve_all_bool(self):
tm.assert_series_equal(r, e)
def test_constructor_no_pandas_array(self):
- ser = pd.Series([1, 2, 3])
- result = pd.Series(ser.array)
+ ser = Series([1, 2, 3])
+ result = Series(ser.array)
tm.assert_series_equal(ser, result)
assert isinstance(result._mgr.blocks[0], IntBlock)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
- ser = pd.Series([1, 2], dtype="int64")
+ ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
def test_from_array(self):
- result = pd.Series(pd.array(["1H", "2H"], dtype="timedelta64[ns]"))
+ result = Series(pd.array(["1H", "2H"], dtype="timedelta64[ns]"))
assert result._mgr.blocks[0].is_extension is False
- result = pd.Series(pd.array(["2015"], dtype="datetime64[ns]"))
+ result = Series(pd.array(["2015"], dtype="datetime64[ns]"))
assert result._mgr.blocks[0].is_extension is False
def test_from_list_dtype(self):
- result = pd.Series(["1H", "2H"], dtype="timedelta64[ns]")
+ result = Series(["1H", "2H"], dtype="timedelta64[ns]")
assert result._mgr.blocks[0].is_extension is False
- result = pd.Series(["2015"], dtype="datetime64[ns]")
+ result = Series(["2015"], dtype="datetime64[ns]")
assert result._mgr.blocks[0].is_extension is False
diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py
index 217e5ae9ee32e..df7ea46dc4f86 100644
--- a/pandas/tests/series/test_logical_ops.py
+++ b/pandas/tests/series/test_logical_ops.py
@@ -153,37 +153,37 @@ def test_logical_operators_bool_dtype_with_int(self):
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
- left = pd.Series([True, True, True, False, True])
+ left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
- expected = pd.Series([True, False, False, False, False])
+ expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & pd.Index(right)
tm.assert_series_equal(result, expected)
- result = left & pd.Series(right)
+ result = left & Series(right)
tm.assert_series_equal(result, expected)
- expected = pd.Series([True, True, True, True, True])
+ expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | pd.Index(right)
tm.assert_series_equal(result, expected)
- result = left | pd.Series(right)
+ result = left | Series(right)
tm.assert_series_equal(result, expected)
- expected = pd.Series([False, True, True, True, True])
+ expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ pd.Index(right)
tm.assert_series_equal(result, expected)
- result = left ^ pd.Series(right)
+ result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
@@ -304,11 +304,11 @@ def test_reversed_logical_op_with_index_returns_series(self, op):
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
- expected = pd.Series(op(idx1.values, ser.values))
+ expected = Series(op(idx1.values, ser.values))
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
- expected = pd.Series(op(idx2.values, ser.values))
+ expected = Series(op(idx2.values, ser.values))
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
@@ -431,18 +431,18 @@ def test_logical_ops_label_based(self):
def test_logical_ops_df_compat(self):
# GH#1134
- s1 = pd.Series([True, False, True], index=list("ABC"), name="x")
- s2 = pd.Series([True, True, False], index=list("ABD"), name="x")
+ s1 = Series([True, False, True], index=list("ABC"), name="x")
+ s2 = Series([True, True, False], index=list("ABD"), name="x")
- exp = pd.Series([True, False, False, False], index=list("ABCD"), name="x")
+ exp = Series([True, False, False, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
- exp_or1 = pd.Series([True, True, True, False], index=list("ABCD"), name="x")
+ exp_or1 = Series([True, True, True, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s1 | s2, exp_or1)
# np.nan | True => np.nan, filled with False
- exp_or = pd.Series([True, True, False, False], index=list("ABCD"), name="x")
+ exp_or = Series([True, True, False, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s2 | s1, exp_or)
# DataFrame doesn't fill nan with False
@@ -454,18 +454,18 @@ def test_logical_ops_df_compat(self):
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp_or.to_frame())
# different length
- s3 = pd.Series([True, False, True], index=list("ABC"), name="x")
- s4 = pd.Series([True, True, True, True], index=list("ABCD"), name="x")
+ s3 = Series([True, False, True], index=list("ABC"), name="x")
+ s4 = Series([True, True, True, True], index=list("ABCD"), name="x")
- exp = pd.Series([True, False, True, False], index=list("ABCD"), name="x")
+ exp = Series([True, False, True, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
- exp_or1 = pd.Series([True, True, True, False], index=list("ABCD"), name="x")
+ exp_or1 = Series([True, True, True, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s3 | s4, exp_or1)
# True | np.nan => True
- exp_or = pd.Series([True, True, True, True], index=list("ABCD"), name="x")
+ exp_or = Series([True, True, True, True], index=list("ABCD"), name="x")
tm.assert_series_equal(s4 | s3, exp_or)
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp.to_frame())
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 0144e4257efe0..712921f70e46f 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -172,7 +172,7 @@ def test_datetime64_tz_fillna(self, tz):
pd.NaT,
]
)
- null_loc = pd.Series([False, True, False, True])
+ null_loc = Series([False, True, False, True])
result = s.fillna(pd.Timestamp("2011-01-02 10:00"))
expected = Series(
@@ -247,7 +247,7 @@ def test_datetime64_tz_fillna(self, tz):
idx = pd.DatetimeIndex(
["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz=tz
)
- s = pd.Series(idx)
+ s = Series(idx)
assert s.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(pd.isna(s), null_loc)
@@ -366,8 +366,8 @@ def test_datetime64_tz_fillna(self, tz):
def test_fillna_dt64tz_with_method(self):
# with timezone
# GH 15855
- ser = pd.Series([pd.Timestamp("2012-11-11 00:00:00+01:00"), pd.NaT])
- exp = pd.Series(
+ ser = Series([pd.Timestamp("2012-11-11 00:00:00+01:00"), pd.NaT])
+ exp = Series(
[
pd.Timestamp("2012-11-11 00:00:00+01:00"),
pd.Timestamp("2012-11-11 00:00:00+01:00"),
@@ -375,8 +375,8 @@ def test_fillna_dt64tz_with_method(self):
)
tm.assert_series_equal(ser.fillna(method="pad"), exp)
- ser = pd.Series([pd.NaT, pd.Timestamp("2012-11-11 00:00:00+01:00")])
- exp = pd.Series(
+ ser = Series([pd.NaT, pd.Timestamp("2012-11-11 00:00:00+01:00")])
+ exp = Series(
[
pd.Timestamp("2012-11-11 00:00:00+01:00"),
pd.Timestamp("2012-11-11 00:00:00+01:00"),
@@ -421,13 +421,13 @@ def test_fillna_consistency(self):
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
- data = pd.Series(
+ data = Series(
[pd.NaT, pd.NaT, datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc)]
)
filled = data.fillna(method="bfill")
- expected = pd.Series(
+ expected = Series(
[
datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),
@@ -440,15 +440,15 @@ def test_datetime64tz_fillna_round_issue(self):
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
- s = pd.Series([1.0, np.nan])
+ s = Series([1.0, np.nan])
result = s.fillna(0, downcast="infer")
- expected = pd.Series([1, 0])
+ expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
- s = pd.Series([1.0, np.nan])
+ s = Series([1.0, np.nan])
result = s.fillna({1: 0}, downcast="infer")
- expected = pd.Series([1, 0])
+ expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_fillna_int(self):
@@ -627,7 +627,7 @@ def test_ffill(self):
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
- series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
+ series = Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
tm.assert_series_equal(series, result)
@@ -710,7 +710,7 @@ def test_datetime64_tz_dropna(self):
idx = pd.DatetimeIndex(
["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz="Asia/Tokyo"
)
- s = pd.Series(idx)
+ s = Series(idx)
assert s.dtype == "datetime64[ns, Asia/Tokyo]"
result = s.dropna()
expected = Series(
diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
index b54c09e5750fd..52a398a00dfe5 100644
--- a/pandas/tests/series/test_period.py
+++ b/pandas/tests/series/test_period.py
@@ -15,7 +15,7 @@ def test_auto_conversion(self):
series = Series(list(period_range("2000-01-01", periods=10, freq="D")))
assert series.dtype == "Period[D]"
- series = pd.Series(
+ series = Series(
[pd.Period("2011-01-01", freq="D"), pd.Period("2011-02-01", freq="D")]
)
assert series.dtype == "Period[D]"
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index bab3853e3bd1d..c4cd12fcbdf3b 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -99,9 +99,9 @@ def test_asfreq_resample_set_correct_freq(self):
def test_view_tz(self):
# GH#24024
- ser = pd.Series(pd.date_range("2000", periods=4, tz="US/Central"))
+ ser = Series(pd.date_range("2000", periods=4, tz="US/Central"))
result = ser.view("i8")
- expected = pd.Series(
+ expected = Series(
[
946706400000000000,
946792800000000000,
@@ -113,7 +113,7 @@ def test_view_tz(self):
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_asarray_object_dt64(self, tz):
- ser = pd.Series(pd.date_range("2000", periods=2, tz=tz))
+ ser = Series(pd.date_range("2000", periods=2, tz=tz))
with tm.assert_produces_warning(None):
# Future behavior (for tzaware case) with no warning
@@ -126,7 +126,7 @@ def test_asarray_object_dt64(self, tz):
def test_asarray_tz_naive(self):
# This shouldn't produce a warning.
- ser = pd.Series(pd.date_range("2000", periods=2))
+ ser = Series(pd.date_range("2000", periods=2))
expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")
result = np.asarray(ser)
@@ -134,7 +134,7 @@ def test_asarray_tz_naive(self):
def test_asarray_tz_aware(self):
tz = "US/Central"
- ser = pd.Series(pd.date_range("2000", periods=2, tz=tz))
+ ser = Series(pd.date_range("2000", periods=2, tz=tz))
expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]")
result = np.asarray(ser, dtype="datetime64[ns]")
diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py
index c7fc37a278e83..bcd6a7a7308a3 100644
--- a/pandas/tests/series/test_ufunc.py
+++ b/pandas/tests/series/test_ufunc.py
@@ -30,7 +30,7 @@ def arrays_for_binary_ufunc():
@pytest.mark.parametrize("ufunc", UNARY_UFUNCS)
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
def test_unary_ufunc(ufunc, sparse):
- # Test that ufunc(Series) == Series(ufunc)
+ # Test that ufunc(pd.Series) == pd.Series(ufunc)
array = np.random.randint(0, 10, 10, dtype="int64")
array[::2] = 0
if sparse:
@@ -49,13 +49,13 @@ def test_unary_ufunc(ufunc, sparse):
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])
def test_binary_ufunc_with_array(flip, sparse, ufunc, arrays_for_binary_ufunc):
- # Test that ufunc(Series(a), array) == Series(ufunc(a, b))
+ # Test that ufunc(pd.Series(a), array) == pd.Series(ufunc(a, b))
a1, a2 = arrays_for_binary_ufunc
if sparse:
a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0))
a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0))
- name = "name" # op(Series, array) preserves the name.
+ name = "name" # op(pd.Series, array) preserves the name.
series = pd.Series(a1, name=name)
other = a2
@@ -76,14 +76,14 @@ def test_binary_ufunc_with_array(flip, sparse, ufunc, arrays_for_binary_ufunc):
@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])
def test_binary_ufunc_with_index(flip, sparse, ufunc, arrays_for_binary_ufunc):
# Test that
- # * func(Series(a), Series(b)) == Series(ufunc(a, b))
- # * ufunc(Index, Series) dispatches to Series (returns a Series)
+ # * func(pd.Series(a), pd.Series(b)) == pd.Series(ufunc(a, b))
+ # * ufunc(Index, pd.Series) dispatches to pd.Series (returns a pd.Series)
a1, a2 = arrays_for_binary_ufunc
if sparse:
a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0))
a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0))
- name = "name" # op(Series, array) preserves the name.
+ name = "name" # op(pd.Series, array) preserves the name.
series = pd.Series(a1, name=name)
other = pd.Index(a2, name=name).astype("int64")
@@ -107,14 +107,14 @@ def test_binary_ufunc_with_series(
flip, shuffle, sparse, ufunc, arrays_for_binary_ufunc
):
# Test that
- # * func(Series(a), Series(b)) == Series(ufunc(a, b))
+ # * func(pd.Series(a), pd.Series(b)) == pd.Series(ufunc(a, b))
# with alignment between the indices
a1, a2 = arrays_for_binary_ufunc
if sparse:
a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0))
a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0))
- name = "name" # op(Series, array) preserves the name.
+ name = "name" # op(pd.Series, array) preserves the name.
series = pd.Series(a1, name=name)
other = pd.Series(a2, name=name)
@@ -146,8 +146,8 @@ def test_binary_ufunc_with_series(
@pytest.mark.parametrize("flip", [True, False])
def test_binary_ufunc_scalar(ufunc, sparse, flip, arrays_for_binary_ufunc):
# Test that
- # * ufunc(Series, scalar) == Series(ufunc(array, scalar))
- # * ufunc(Series, scalar) == ufunc(scalar, Series)
+ # * ufunc(pd.Series, scalar) == pd.Series(ufunc(array, scalar))
+ # * ufunc(pd.Series, scalar) == ufunc(scalar, pd.Series)
array, _ = arrays_for_binary_ufunc
if sparse:
array = SparseArray(array)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 37d92e220e4cd..3a1279c481a1d 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -851,10 +851,10 @@ def test_same_nan_is_in_large(self):
def test_same_nan_is_in_large_series(self):
# https://github.com/pandas-dev/pandas/issues/22205
s = np.tile(1.0, 1_000_001)
- series = pd.Series(s)
+ series = Series(s)
s[0] = np.nan
result = series.isin([np.nan, 1])
- expected = pd.Series(np.ones(len(s), dtype=bool))
+ expected = Series(np.ones(len(s), dtype=bool))
tm.assert_series_equal(result, expected)
def test_same_object_is_in(self):
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index c45e4508c6153..398457f81a266 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1036,7 +1036,7 @@ def test_use_bottleneck():
)
def test_numpy_ops(numpy_op, expected):
# GH8383
- result = numpy_op(pd.Series([1, 2, 3, 4]))
+ result = numpy_op(Series([1, 2, 3, 4]))
assert result == expected
@@ -1062,7 +1062,7 @@ def test_numpy_ops(numpy_op, expected):
)
def test_nanops_independent_of_mask_param(operation):
# GH22764
- s = pd.Series([1, 2, np.nan, 3, np.nan, 4])
+ s = Series([1, 2, np.nan, 3, np.nan, 4])
mask = s.isna()
median_expected = operation(s)
median_result = operation(s, mask=mask)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 61df5d4d5fdd6..9be5abb9dda65 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -132,7 +132,7 @@ def any_string_method(request):
Examples
--------
>>> def test_something(any_string_method):
- ... s = pd.Series(['a', 'b', np.nan, 'd'])
+ ... s = Series(['a', 'b', np.nan, 'd'])
...
... method_name, args, kwargs = any_string_method
... method = getattr(s.str, method_name)
@@ -183,7 +183,7 @@ def any_allowed_skipna_inferred_dtype(request):
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
...
... # constructor for .str-accessor will also pass
- ... pd.Series(values).str
+ ... Series(values).str
"""
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # object dtype to avoid casting
@@ -2546,8 +2546,8 @@ def test_split(self):
@pytest.mark.parametrize("dtype", [object, "string"])
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(self, dtype, method):
- s = pd.Series(["a b", pd.NA, "b c"], dtype=dtype)
- expected = pd.Series([["a", "b"], pd.NA, ["b", "c"]])
+ s = Series(["a b", pd.NA, "b c"], dtype=dtype)
+ expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
@@ -3653,14 +3653,14 @@ def test_string_array_extract():
@pytest.mark.parametrize("klass", [tuple, list, np.array, pd.Series, pd.Index])
def test_cat_different_classes(klass):
# https://github.com/pandas-dev/pandas/issues/33425
- s = pd.Series(["a", "b", "c"])
+ s = Series(["a", "b", "c"])
result = s.str.cat(klass(["x", "y", "z"]))
- expected = pd.Series(["ax", "by", "cz"])
+ expected = Series(["ax", "by", "cz"])
tm.assert_series_equal(result, expected)
def test_str_get_stringarray_multiple_nans():
- s = pd.Series(pd.array(["a", "ab", pd.NA, "abc"]))
+ s = Series(pd.array(["a", "ab", pd.NA, "abc"]))
result = s.str.get(2)
- expected = pd.Series(pd.array([pd.NA, pd.NA, pd.NA, "c"]))
+ expected = Series(pd.array([pd.NA, pd.NA, pd.NA, "c"]))
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 63f9a2532fa73..a070d45089f96 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -714,17 +714,17 @@ def test_to_datetime_utc_true(
def test_to_datetime_utc_true_with_series_single_value(self, cache):
# GH 15760 UTC=True with Series
ts = 1.5e18
- result = pd.to_datetime(pd.Series([ts]), utc=True, cache=cache)
- expected = pd.Series([pd.Timestamp(ts, tz="utc")])
+ result = pd.to_datetime(Series([ts]), utc=True, cache=cache)
+ expected = Series([pd.Timestamp(ts, tz="utc")])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):
ts = "2013-01-01 00:00:00-01:00"
expected_ts = "2013-01-01 01:00:00"
- data = pd.Series([ts] * 3)
+ data = Series([ts] * 3)
result = pd.to_datetime(data, utc=True, cache=cache)
- expected = pd.Series([pd.Timestamp(expected_ts, tz="utc")] * 3)
+ expected = Series([pd.Timestamp(expected_ts, tz="utc")] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@@ -736,8 +736,8 @@ def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):
],
)
def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype):
- expected = pd.Series([pd.Timestamp("2013-01-01 01:00:00", tz="UTC")])
- result = pd.to_datetime(pd.Series([date], dtype=dtype), utc=True, cache=cache)
+ expected = Series([pd.Timestamp("2013-01-01 01:00:00", tz="UTC")])
+ result = pd.to_datetime(Series([date], dtype=dtype), utc=True, cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@@ -929,7 +929,7 @@ def test_to_datetime_from_deque(self):
def test_to_datetime_cache_series(self, utc, format):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
- data = pd.Series(test_dates)
+ data = Series(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_series_equal(result, expected)
@@ -1049,7 +1049,7 @@ def test_iso8601_strings_mixed_offsets_with_naive(self):
def test_mixed_offsets_with_native_datetime_raises(self):
# GH 25978
- s = pd.Series(
+ s = Series(
[
"nan",
pd.Timestamp("1990-01-01"),
@@ -1420,7 +1420,7 @@ def test_dataframe_utc_true(self):
# GH 23760
df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
result = pd.to_datetime(df, utc=True)
- expected = pd.Series(
+ expected = Series(
np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[ns]")
).dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
@@ -1579,7 +1579,7 @@ def test_to_datetime_with_apply(self, cache):
result = td.apply(pd.to_datetime, format="%b %y", cache=cache)
tm.assert_series_equal(result, expected)
- td = pd.Series(["May 04", "Jun 02", ""], index=[1, 2, 3])
+ td = Series(["May 04", "Jun 02", ""], index=[1, 2, 3])
msg = r"time data '' does not match format '%b %y' \(match\)"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(td, format="%b %y", errors="raise", cache=cache)
@@ -1818,7 +1818,7 @@ def test_guess_datetime_format_for_array(self):
class TestToDatetimeInferFormat:
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
- s = pd.Series(pd.date_range("20000101", periods=50, freq="H"))
+ s = Series(pd.date_range("20000101", periods=50, freq="H"))
test_formats = ["%m-%d-%Y", "%m/%d/%Y %H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S.%f"]
@@ -1842,7 +1842,7 @@ def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
- s = pd.Series(
+ s = Series(
np.array(
["01/01/2011 00:00:00", "01-02-2011 00:00:00", "2011-01-03T00:00:00"]
)
@@ -1855,7 +1855,7 @@ def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
- s = pd.Series(np.array(["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"]))
+ s = Series(np.array(["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"]))
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
@@ -1864,7 +1864,7 @@ def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):
- s = pd.Series(
+ s = Series(
np.array(["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan])
)
tm.assert_series_equal(
@@ -1874,7 +1874,7 @@ def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
- s = pd.Series(
+ s = Series(
np.array(
[
np.nan,
@@ -1896,9 +1896,9 @@ def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
)
def test_infer_datetime_format_tz_name(self, tz_name, offset):
# GH 33133
- s = pd.Series([f"2019-02-02 08:07:13 {tz_name}"])
+ s = Series([f"2019-02-02 08:07:13 {tz_name}"])
result = to_datetime(s, infer_datetime_format=True)
- expected = pd.Series(
+ expected = Series(
[pd.Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))]
)
tm.assert_series_equal(result, expected)
@@ -1906,8 +1906,8 @@ def test_infer_datetime_format_tz_name(self, tz_name, offset):
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601_noleading_0s(self, cache):
# GH 11871
- s = pd.Series(["2014-1-1", "2014-2-2", "2015-3-3"])
- expected = pd.Series(
+ s = Series(["2014-1-1", "2014-2-2", "2015-3-3"])
+ expected = Series(
[
pd.Timestamp("2014-01-01"),
pd.Timestamp("2014-02-02"),
@@ -2410,13 +2410,13 @@ def test_should_cache_errors(unique_share, check_count, err_message):
def test_nullable_integer_to_datetime():
# Test for #30050
- ser = pd.Series([1, 2, None, 2 ** 61, None])
+ ser = Series([1, 2, None, 2 ** 61, None])
ser = ser.astype("Int64")
ser_copy = ser.copy()
res = pd.to_datetime(ser, unit="ns")
- expected = pd.Series(
+ expected = Series(
[
np.datetime64("1970-01-01 00:00:00.000000001"),
np.datetime64("1970-01-01 00:00:00.000000002"),
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index b22f249de2826..713607d087bc0 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -571,8 +571,8 @@ def test_downcast_limits(dtype, downcast, min_max):
"ser,expected",
[
(
- pd.Series([0, 9223372036854775808]),
- pd.Series([0, 9223372036854775808], dtype=np.uint64),
+ Series([0, 9223372036854775808]),
+ Series([0, 9223372036854775808], dtype=np.uint64),
)
],
)
@@ -647,7 +647,7 @@ def test_failure_to_convert_uint64_string_to_NaN():
assert np.isnan(result)
ser = Series([32, 64, np.nan])
- result = to_numeric(pd.Series(["32", "64", "uint64"]), errors="coerce")
+ result = to_numeric(Series(["32", "64", "uint64"]), errors="coerce")
tm.assert_series_equal(result, ser)
diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py
index 53746aa048663..0f56fb0b93642 100644
--- a/pandas/tests/util/test_assert_series_equal.py
+++ b/pandas/tests/util/test_assert_series_equal.py
@@ -301,14 +301,14 @@ def test_series_equal_exact_for_nonnumeric():
@pytest.mark.parametrize("right_dtype", ["Int32", "int64"])
def test_assert_series_equal_ignore_extension_dtype_mismatch(right_dtype):
# https://github.com/pandas-dev/pandas/issues/35715
- left = pd.Series([1, 2, 3], dtype="Int64")
- right = pd.Series([1, 2, 3], dtype=right_dtype)
+ left = Series([1, 2, 3], dtype="Int64")
+ right = Series([1, 2, 3], dtype=right_dtype)
tm.assert_series_equal(left, right, check_dtype=False)
def test_allows_duplicate_labels():
- left = pd.Series([1])
- right = pd.Series([1]).set_flags(allows_duplicate_labels=False)
+ left = Series([1])
+ right = Series([1]).set_flags(allows_duplicate_labels=False)
tm.assert_series_equal(left, left)
tm.assert_series_equal(right, right)
tm.assert_series_equal(left, right, check_flags=False)
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index ff29df39e1871..f761b6b4ffd7a 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -302,12 +302,12 @@ def test_hash_with_tuple():
df = pd.DataFrame({"data": [tuple("1"), tuple("2")]})
result = hash_pandas_object(df)
- expected = pd.Series([10345501319357378243, 8331063931016360761], dtype=np.uint64)
+ expected = Series([10345501319357378243, 8331063931016360761], dtype=np.uint64)
tm.assert_series_equal(result, expected)
df2 = pd.DataFrame({"data": [tuple([1]), tuple([2])]})
result = hash_pandas_object(df2)
- expected = pd.Series([9408946347443669104, 3278256261030523334], dtype=np.uint64)
+ expected = Series([9408946347443669104, 3278256261030523334], dtype=np.uint64)
tm.assert_series_equal(result, expected)
# require that the elements of such tuples are themselves hashable
@@ -319,6 +319,6 @@ def test_hash_with_tuple():
def test_hash_object_none_key():
# https://github.com/pandas-dev/pandas/issues/30887
- result = pd.util.hash_pandas_object(pd.Series(["a", "b"]), hash_key=None)
- expected = pd.Series([4578374827886788867, 17338122309987883691], dtype="uint64")
+ result = pd.util.hash_pandas_object(Series(["a", "b"]), hash_key=None)
+ expected = Series([4578374827886788867, 17338122309987883691], dtype="uint64")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py
index 96ad83f6b40b1..6ab53c8e2ec0d 100644
--- a/pandas/tests/window/moments/test_moments_consistency_rolling.py
+++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py
@@ -143,8 +143,8 @@ def test_rolling_apply_consistency(
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
- s = pd.Series(np.zeros(20))
- other = pd.Series(np.arange(20))
+ s = Series(np.zeros(20))
+ other = Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py
index 287cd7ebba536..605b85344ba76 100644
--- a/pandas/tests/window/moments/test_moments_ewm.py
+++ b/pandas/tests/window/moments/test_moments_ewm.py
@@ -2,7 +2,6 @@
from numpy.random import randn
import pytest
-import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
@@ -20,7 +19,7 @@ def test_ewma_frame(frame, name):
def test_ewma_adjust():
- vals = pd.Series(np.zeros(1000))
+ vals = Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@@ -295,7 +294,7 @@ def test_ewm_domain_checks(arr):
@pytest.mark.parametrize("method", ["mean", "vol", "var"])
def test_ew_empty_series(method):
- vals = pd.Series([], dtype=np.float64)
+ vals = Series([], dtype=np.float64)
ewm = vals.ewm(3)
result = getattr(ewm, method)()
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index 488306d0585c5..2f622c2bc3e60 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -74,17 +74,17 @@ def test_cmov_window():
def test_cmov_window_corner():
# GH 8238
# all nan
- vals = pd.Series([np.nan] * 10)
+ vals = Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
- vals = pd.Series([], dtype=object)
+ vals = Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
- vals = pd.Series(np.random.randn(5))
+ vals = Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@@ -523,22 +523,22 @@ def test_cmov_window_special_linear_range(win_types_special):
def test_rolling_min_min_periods():
- a = pd.Series([1, 2, 3, 4, 5])
+ a = Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
- expected = pd.Series(np.ones(len(a)))
+ expected = Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
msg = "min_periods 5 must be <= window 3"
with pytest.raises(ValueError, match=msg):
- pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
+ Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max_min_periods():
- a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
+ a = Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
msg = "min_periods 5 must be <= window 3"
with pytest.raises(ValueError, match=msg):
- pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
+ Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
def test_rolling_quantile_np_percentile():
@@ -610,17 +610,17 @@ def test_rolling_quantile_param():
def test_rolling_std_1obs():
- vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0])
+ vals = Series([1.0, 2.0, 3.0, 4.0, 5.0])
result = vals.rolling(1, min_periods=1).std()
- expected = pd.Series([np.nan] * 5)
+ expected = Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
- expected = pd.Series([0.0] * 5)
+ expected = Series([0.0] * 5)
tm.assert_series_equal(result, expected)
- result = pd.Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
+ result = Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
assert np.isnan(result[2])
@@ -629,7 +629,7 @@ def test_rolling_std_neg_sqrt():
# Test move_nanstd for neg sqrt.
- a = pd.Series(
+ a = Series(
[
0.0011448196318903589,
0.00028718669878572767,
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 5b25577602216..fbdf8c775530a 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -550,7 +550,7 @@ def test_groupby_rolling_count_closed_on(self):
.rolling("3d", on="date", closed="left")["column1"]
.count()
)
- expected = pd.Series(
+ expected = Series(
[np.nan, 1.0, 1.0, np.nan, 1.0, 1.0],
name="column1",
index=pd.MultiIndex.from_tuples(
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index f1d54d91c6d22..312b30e4491a6 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -136,22 +136,20 @@ def test_closed():
def test_closed_empty(closed, arithmetic_win_operators):
# GH 26005
func_name = arithmetic_win_operators
- ser = pd.Series(
- data=np.arange(5), index=pd.date_range("2000", periods=5, freq="2D")
- )
+ ser = Series(data=np.arange(5), index=pd.date_range("2000", periods=5, freq="2D"))
roll = ser.rolling("1D", closed=closed)
result = getattr(roll, func_name)()
- expected = pd.Series([np.nan] * 5, index=ser.index)
+ expected = Series([np.nan] * 5, index=ser.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_closed_one_entry(func):
# GH24718
- ser = pd.Series(data=[2], index=pd.date_range("2000", periods=1))
+ ser = Series(data=[2], index=pd.date_range("2000", periods=1))
result = getattr(ser.rolling("10D", closed="left"), func)()
- tm.assert_series_equal(result, pd.Series([np.nan], index=ser.index))
+ tm.assert_series_equal(result, Series([np.nan], index=ser.index))
@pytest.mark.parametrize("func", ["min", "max"])
@@ -166,7 +164,7 @@ def test_closed_one_entry_groupby(func):
exp_idx = pd.MultiIndex.from_arrays(
arrays=[[1, 1, 2], ser.index], names=("A", None)
)
- expected = pd.Series(data=[np.nan, 3, np.nan], index=exp_idx, name="B")
+ expected = Series(data=[np.nan, 3, np.nan], index=exp_idx, name="B")
tm.assert_series_equal(result, expected)
@@ -186,23 +184,23 @@ def test_closed_one_entry_groupby(func):
)
def test_closed_min_max_datetime(input_dtype, func, closed, expected):
# see gh-21704
- ser = pd.Series(
+ ser = Series(
data=np.arange(10).astype(input_dtype), index=pd.date_range("2000", periods=10)
)
result = getattr(ser.rolling("3D", closed=closed), func)()
- expected = pd.Series(expected, index=ser.index)
+ expected = Series(expected, index=ser.index)
tm.assert_series_equal(result, expected)
def test_closed_uneven():
# see gh-21704
- ser = pd.Series(data=np.arange(10), index=pd.date_range("2000", periods=10))
+ ser = Series(data=np.arange(10), index=pd.date_range("2000", periods=10))
# uneven
ser = ser.drop(index=ser.index[[1, 5]])
result = ser.rolling("3D", closed="left").min()
- expected = pd.Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index)
+ expected = Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index)
tm.assert_series_equal(result, expected)
@@ -221,10 +219,10 @@ def test_closed_uneven():
)
def test_closed_min_max_minp(func, closed, expected):
# see gh-21704
- ser = pd.Series(data=np.arange(10), index=pd.date_range("2000", periods=10))
+ ser = Series(data=np.arange(10), index=pd.date_range("2000", periods=10))
ser[ser.index[-3:]] = np.nan
result = getattr(ser.rolling("3D", min_periods=2, closed=closed), func)()
- expected = pd.Series(expected, index=ser.index)
+ expected = Series(expected, index=ser.index)
tm.assert_series_equal(result, expected)
@@ -239,9 +237,9 @@ def test_closed_min_max_minp(func, closed, expected):
)
def test_closed_median_quantile(closed, expected):
# GH 26005
- ser = pd.Series(data=np.arange(10), index=pd.date_range("2000", periods=10))
+ ser = Series(data=np.arange(10), index=pd.date_range("2000", periods=10))
roll = ser.rolling("3D", closed=closed)
- expected = pd.Series(expected, index=ser.index)
+ expected = Series(expected, index=ser.index)
result = roll.median()
tm.assert_series_equal(result, expected)
@@ -267,8 +265,8 @@ def tests_empty_df_rolling(roller):
def test_empty_window_median_quantile():
# GH 26005
- expected = pd.Series([np.nan, np.nan, np.nan])
- roll = pd.Series(np.arange(3)).rolling(0)
+ expected = Series([np.nan, np.nan, np.nan])
+ roll = Series(np.arange(3)).rolling(0)
result = roll.median()
tm.assert_series_equal(result, expected)
@@ -280,27 +278,27 @@ def test_empty_window_median_quantile():
def test_missing_minp_zero():
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
- x = pd.Series([np.nan])
+ x = Series([np.nan])
result = x.rolling(1, min_periods=0).sum()
- expected = pd.Series([0.0])
+ expected = Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.rolling(1, min_periods=1).sum()
- expected = pd.Series([np.nan])
+ expected = Series([np.nan])
tm.assert_series_equal(result, expected)
def test_missing_minp_zero_variable():
# https://github.com/pandas-dev/pandas/pull/18921
- x = pd.Series(
+ x = Series(
[np.nan] * 4,
index=pd.DatetimeIndex(
["2017-01-01", "2017-01-04", "2017-01-06", "2017-01-07"]
),
)
result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum()
- expected = pd.Series(0.0, index=x.index)
+ expected = Series(0.0, index=x.index)
tm.assert_series_equal(result, expected)
@@ -349,8 +347,8 @@ def test_readonly_array():
# GH-27766
arr = np.array([1, 3, np.nan, 3, 5])
arr.setflags(write=False)
- result = pd.Series(arr).rolling(2).mean()
- expected = pd.Series([np.nan, 2, np.nan, np.nan, 4])
+ result = Series(arr).rolling(2).mean()
+ expected = Series([np.nan, 2, np.nan, np.nan, 4])
tm.assert_series_equal(result, expected)
@@ -442,7 +440,7 @@ def test_min_periods1():
# GH#6795
df = pd.DataFrame([0, 1, 2, 1, 0], columns=["a"])
result = df["a"].rolling(3, center=True, min_periods=1).max()
- expected = pd.Series([1.0, 2.0, 2.0, 2.0, 1.0], name="a")
+ expected = Series([1.0, 2.0, 2.0, 2.0, 1.0], name="a")
tm.assert_series_equal(result, expected)
@@ -741,7 +739,7 @@ def test_rolling_numerical_accuracy_kahan_sum():
# GH: 13254
df = pd.DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=["x"])
result = df["x"].rolling(3).sum()
- expected = pd.Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name="x")
+ expected = Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name="x")
tm.assert_series_equal(result, expected)
@@ -770,10 +768,10 @@ def test_rolling_numerical_accuracy_small_values():
def test_rolling_numerical_too_large_numbers():
# GH: 11645
dates = pd.date_range("2015-01-01", periods=10, freq="D")
- ds = pd.Series(data=range(10), index=dates, dtype=np.float64)
+ ds = Series(data=range(10), index=dates, dtype=np.float64)
ds[2] = -9e33
result = ds.rolling(5).mean()
- expected = pd.Series(
+ expected = Series(
[np.nan, np.nan, np.nan, np.nan, -1.8e33, -1.8e33, -1.8e33, 5.0, 6.0, 7.0],
index=dates,
)
@@ -864,9 +862,9 @@ def test_rolling_on_df_transposed():
)
def test_rolling_period_index(index, window, func, values):
# GH: 34225
- ds = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8], index=index)
+ ds = Series([0, 1, 2, 3, 4, 5, 6, 7, 8], index=index)
result = getattr(ds.rolling(window, closed="left"), func)()
- expected = pd.Series(values, index=index)
+ expected = Series(values, index=index)
tm.assert_series_equal(result, expected)
@@ -876,8 +874,8 @@ def test_rolling_sem(constructor):
obj = getattr(pd, constructor)([0, 1, 2])
result = obj.rolling(2, min_periods=1).sem()
if isinstance(result, DataFrame):
- result = pd.Series(result[0].values)
- expected = pd.Series([np.nan] + [0.707107] * 2)
+ result = Series(result[0].values)
+ expected = Series([np.nan] + [0.707107] * 2)
tm.assert_series_equal(result, expected)
@@ -892,7 +890,7 @@ def test_rolling_sem(constructor):
)
def test_rolling_var_numerical_issues(func, third_value, values):
# GH: 37051
- ds = pd.Series([99999999999999999, 1, third_value, 2, 3, 1, 1])
+ ds = Series([99999999999999999, 1, third_value, 2, 3, 1, 1])
result = getattr(ds.rolling(2), func)()
- expected = pd.Series([np.nan] + values)
+ expected = Series([np.nan] + values)
tm.assert_series_equal(result, expected)
| Adding a CI check that we aren't (for instance) using Series(...) and pd.Series(...) in the same file. ~This is kept intentionally small in scope (checking only DataFrame and Series for one file name right now) since this is very common in the code base and I'm not sure if this is something we'd actually want to enforce.~ | https://api.github.com/repos/pandas-dev/pandas/pulls/37188 | 2020-10-17T03:51:42Z | 2020-10-21T00:52:16Z | 2020-10-21T00:52:15Z | 2020-10-21T00:54:43Z |
call __finalize__ in more methods | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 83a9edfb239e2..b4d1787697973 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -524,7 +524,7 @@ Other
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`)
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`)
- Fixed bug in metadata propagation incorrectly copying DataFrame columns as metadata when the column name overlaps with the metadata name (:issue:`37037`)
-- Fixed metadata propagation in the :class:`Series.dt` and :class:`Series.str` accessors (:issue:`28283`)
+- Fixed metadata propagation in the :class:`Series.dt` and :class:`Series.str` accessors and :class:`DataFrame.duplicated` and ::class:`DataFrame.stack` methods (:issue:`28283`)
- Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`)
- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError``, from a bare ``Exception`` previously (:issue:`35744`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 801307a8f9481..ee8a83fd6c091 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5286,7 +5286,8 @@ def f(vals):
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
- return self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)
+ result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)
+ return result.__finalize__(self, method="duplicated")
# ----------------------------------------------------------------------
# Sorting
@@ -7096,9 +7097,11 @@ def stack(self, level=-1, dropna=True):
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
- return stack_multiple(self, level, dropna=dropna)
+ result = stack_multiple(self, level, dropna=dropna)
else:
- return stack(self, level, dropna=dropna)
+ result = stack(self, level, dropna=dropna)
+
+ return result.__finalize__(self, method="stack")
def explode(
self, column: Union[str, Tuple], ignore_index: bool = False
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index 9d1f52e03b3d1..9d2e5cbcc7b58 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -115,10 +115,7 @@
(pd.DataFrame, frame_data, operator.methodcaller("notnull")),
(pd.DataFrame, frame_data, operator.methodcaller("dropna")),
(pd.DataFrame, frame_data, operator.methodcaller("drop_duplicates")),
- pytest.param(
- (pd.DataFrame, frame_data, operator.methodcaller("duplicated")),
- marks=not_implemented_mark,
- ),
+ (pd.DataFrame, frame_data, operator.methodcaller("duplicated")),
(pd.DataFrame, frame_data, operator.methodcaller("sort_values", by="A")),
(pd.DataFrame, frame_data, operator.methodcaller("sort_index")),
(pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")),
@@ -169,10 +166,7 @@
),
marks=not_implemented_mark,
),
- pytest.param(
- (pd.DataFrame, frame_data, operator.methodcaller("stack")),
- marks=not_implemented_mark,
- ),
+ (pd.DataFrame, frame_data, operator.methodcaller("stack")),
pytest.param(
(pd.DataFrame, frame_data, operator.methodcaller("explode", "A")),
marks=not_implemented_mark,
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
The goal here is to make progress on some of the more uncontroversial/easier content of #28283. I'll split off into another PR if this gets too big | https://api.github.com/repos/pandas-dev/pandas/pulls/37186 | 2020-10-17T02:52:42Z | 2020-10-20T18:01:26Z | 2020-10-20T18:01:26Z | 2020-10-20T18:01:30Z |
DOC: Add example to Series.divmod | diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py
index 08b7b0e89ea5f..06ed321327e06 100644
--- a/pandas/core/ops/docstrings.py
+++ b/pandas/core/ops/docstrings.py
@@ -26,6 +26,8 @@ def make_flex_doc(op_name: str, typ: str) -> str:
assert op_desc_op is not None # for mypy
if op_name.startswith("r"):
equiv = "other " + op_desc_op + " " + typ
+ elif op_name == "divmod":
+ equiv = f"{op_name}({typ}, other)"
else:
equiv = typ + " " + op_desc_op + " other"
@@ -162,6 +164,25 @@ def make_flex_doc(op_name: str, typ: str) -> str:
"""
)
+_divmod_example_SERIES = (
+ _common_examples_algebra_SERIES
+ + """
+>>> a.divmod(b, fill_value=0)
+(a 1.0
+ b NaN
+ c NaN
+ d 0.0
+ e NaN
+ dtype: float64,
+ a 0.0
+ b NaN
+ c NaN
+ d 0.0
+ e NaN
+ dtype: float64)
+"""
+)
+
_mod_example_SERIES = (
_common_examples_algebra_SERIES
+ """
@@ -332,7 +353,7 @@ def make_flex_doc(op_name: str, typ: str) -> str:
"op": "divmod",
"desc": "Integer division and modulo",
"reverse": "rdivmod",
- "series_examples": None,
+ "series_examples": _divmod_example_SERIES,
"series_returns": _returns_tuple,
"df_examples": None,
},
|
- [x] closes #24589
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
-----
[Current doc](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.divmod.html); Rendered doc:

| https://api.github.com/repos/pandas-dev/pandas/pulls/37185 | 2020-10-17T02:42:14Z | 2020-10-17T13:38:54Z | 2020-10-17T13:38:54Z | 2020-10-17T13:39:56Z |
CLN: core/dtypes/cast.py::maybe_casted_values | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index e550309461de4..925a918910703 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -491,37 +491,23 @@ def maybe_casted_values(
if codes is not None:
mask: np.ndarray = codes == -1
- # we can have situations where the whole mask is -1,
- # meaning there is nothing found in codes, so make all nan's
if mask.size > 0 and mask.all():
+ # we can have situations where the whole mask is -1,
+ # meaning there is nothing found in codes, so make all nan's
+
dtype = index.dtype
fill_value = na_value_for_dtype(dtype)
values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
+
else:
values = values.take(codes)
- # TODO(https://github.com/pandas-dev/pandas/issues/24206)
- # Push this into maybe_upcast_putmask?
- # We can't pass EAs there right now. Looks a bit
- # complicated.
- # So we unbox the ndarray_values, op, re-box.
- values_type = type(values)
- values_dtype = values.dtype
-
- from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
-
- if isinstance(values, DatetimeLikeArrayMixin):
- values = values._data # TODO: can we de-kludge yet?
-
if mask.any():
if isinstance(values, np.ndarray):
values, _ = maybe_upcast_putmask(values, mask, np.nan)
else:
values[mask] = np.nan
- if issubclass(values_type, DatetimeLikeArrayMixin):
- values = values_type(values, dtype=values_dtype)
-
return values
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
xref https://github.com/pandas-dev/pandas/pull/36985#issuecomment-705804862 | https://api.github.com/repos/pandas-dev/pandas/pulls/37183 | 2020-10-17T01:54:14Z | 2020-10-17T13:35:54Z | 2020-10-17T13:35:54Z | 2020-10-17T13:36:00Z |
Tests for where() with categorical | diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
index c4a2cb90f7090..cbb34d595eb9b 100644
--- a/pandas/tests/series/indexing/test_where.py
+++ b/pandas/tests/series/indexing/test_where.py
@@ -452,3 +452,15 @@ def test_where_empty_series_and_empty_cond_having_non_bool_dtypes():
ser = Series([], dtype=float)
result = ser.where([])
tm.assert_series_equal(result, ser)
+
+
+@pytest.mark.parametrize("klass", [Series, pd.DataFrame])
+def test_where_categorical(klass):
+ # https://github.com/pandas-dev/pandas/issues/18888
+ exp = klass(
+ pd.Categorical(["A", "A", "B", "B", np.nan], categories=["A", "B", "C"]),
+ dtype="category",
+ )
+ df = klass(["A", "A", "B", "B", "C"], dtype="category")
+ res = df.where(df != "C")
+ tm.assert_equal(exp, res)
| - [X] closes #18888
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Hey everyone,
I wrote some tests for issue #18888. I don't know exactly if the format is correct but if any change is needed I'm happy to make it right.
Which 'whatsnew entry' should I report for both new tests?
Also, if you think this is a decent contribution and if there's no problem with doing that, it would be nice to tag this PR with hacktoberfest-accepted label :) | https://api.github.com/repos/pandas-dev/pandas/pulls/37182 | 2020-10-17T01:09:43Z | 2020-10-20T01:37:29Z | 2020-10-20T01:37:29Z | 2020-10-20T01:37:40Z |
BUG: Fix isin with read-only target | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 6892fb62028c9..d5b6abd9f9de4 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -28,6 +28,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
- Bug causing ``groupby(...).sum()`` and similar to not preserve metadata (:issue:`29442`)
+- Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` raising a ``ValueError`` when the target was read-only (:issue:`37174`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index fcd081f563f92..4a466ada765ca 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -208,7 +208,7 @@ def duplicated_{{dtype}}(const {{c_type}}[:] values, object keep='first'):
{{if dtype == 'object'}}
def ismember_{{dtype}}(ndarray[{{c_type}}] arr, ndarray[{{c_type}}] values):
{{else}}
-def ismember_{{dtype}}(const {{c_type}}[:] arr, {{c_type}}[:] values):
+def ismember_{{dtype}}(const {{c_type}}[:] arr, const {{c_type}}[:] values):
{{endif}}
"""
Return boolean of values in arr on an
diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py
index 35d45bd00131b..29a3a0106c56c 100644
--- a/pandas/tests/frame/methods/test_isin.py
+++ b/pandas/tests/frame/methods/test_isin.py
@@ -204,3 +204,12 @@ def test_isin_category_frame(self, values):
result = df.isin(values)
tm.assert_frame_equal(result, expected)
+
+ def test_isin_read_only(self):
+ # https://github.com/pandas-dev/pandas/issues/37174
+ arr = np.array([1, 2, 3])
+ arr.setflags(write=False)
+ df = DataFrame([1, 2, 3])
+ result = df.isin(arr)
+ expected = DataFrame([True, True, True])
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py
index 3836c1d56bf87..62766c692f4df 100644
--- a/pandas/tests/series/methods/test_isin.py
+++ b/pandas/tests/series/methods/test_isin.py
@@ -80,3 +80,12 @@ def test_isin_empty(self, empty):
result = s.isin(empty)
tm.assert_series_equal(expected, result)
+
+ def test_isin_read_only(self):
+ # https://github.com/pandas-dev/pandas/issues/37174
+ arr = np.array([1, 2, 3])
+ arr.setflags(write=False)
+ s = Series([1, 2, 3])
+ result = s.isin(arr)
+ expected = Series([True, True, True])
+ tm.assert_series_equal(result, expected)
| - [x] closes #37174
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37181 | 2020-10-17T00:02:58Z | 2020-10-17T17:04:02Z | 2020-10-17T17:04:01Z | 2020-10-18T14:04:50Z |
BUG: use cls instead of MultiIndex in MultiIndex classmethods | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d012d5704f716..4ba7dc58a3527 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -463,7 +463,7 @@ def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> "MultiInde
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
- return MultiIndex(
+ return cls(
levels=levels,
codes=codes,
sortorder=sortorder,
@@ -534,7 +534,7 @@ def from_tuples(
else:
arrays = zip(*tuples)
- return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
+ return cls.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=lib.no_default):
@@ -593,7 +593,7 @@ def from_product(cls, iterables, sortorder=None, names=lib.no_default):
# codes are all ndarrays, so cartesian_product is lossless
codes = cartesian_product(codes)
- return MultiIndex(levels, codes, sortorder=sortorder, names=names)
+ return cls(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df, sortorder=None, names=None):
| - [x] closes #11267
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This doesn't go through all the places where MultiIndex is hard-coded, just the classmethods where in `cls` should be used as a matter of principle. | https://api.github.com/repos/pandas-dev/pandas/pulls/37180 | 2020-10-16T23:08:53Z | 2020-10-17T01:51:37Z | 2020-10-17T01:51:36Z | 2020-10-17T01:51:49Z |
REF: Simplify creation of rolling window indexers internally | diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py
index aa1dfe8567c15..0505913aaf8cc 100644
--- a/pandas/core/window/expanding.py
+++ b/pandas/core/window/expanding.py
@@ -8,7 +8,7 @@
from pandas.util._decorators import Appender, Substitution, doc
from pandas.core.window.common import _doc_template, _shared_docs
-from pandas.core.window.indexers import ExpandingIndexer, GroupbyIndexer
+from pandas.core.window.indexers import BaseIndexer, ExpandingIndexer, GroupbyIndexer
from pandas.core.window.rolling import BaseWindowGroupby, RollingAndExpandingMixin
@@ -68,11 +68,17 @@ def __init__(self, obj, min_periods=1, center=None, axis=0, **kwargs):
def _constructor(self):
return Expanding
- def _get_window(
+ def _get_window_indexer(self) -> BaseIndexer:
+ """
+ Return an indexer class that will compute the window start and end bounds
+ """
+ return ExpandingIndexer()
+
+ def _get_cov_corr_window(
self, other: Optional[Union[np.ndarray, FrameOrSeries]] = None, **kwargs
) -> int:
"""
- Get the window length over which to perform some operation.
+ Get the window length over which to perform cov and corr operations.
Parameters
----------
@@ -275,15 +281,10 @@ class ExpandingGroupby(BaseWindowGroupby, Expanding):
Provide a expanding groupby implementation.
"""
- def _get_window_indexer(self, window: int) -> GroupbyIndexer:
+ def _get_window_indexer(self) -> GroupbyIndexer:
"""
Return an indexer class that will compute the window start and end bounds
- Parameters
- ----------
- window : int
- window size for FixedWindowIndexer (unused)
-
Returns
-------
GroupbyIndexer
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 452e1c252183f..1fcc47931e882 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -213,9 +213,9 @@ def __getattr__(self, attr: str):
def _dir_additions(self):
return self.obj._dir_additions()
- def _get_window(
+ def _get_cov_corr_window(
self, other: Optional[Union[np.ndarray, FrameOrSeries]] = None
- ) -> int:
+ ) -> Optional[Union[int, timedelta, BaseOffset, BaseIndexer]]:
"""
Return window length.
@@ -228,8 +228,6 @@ def _get_window(
-------
window : int
"""
- if isinstance(self.window, BaseIndexer):
- return self.min_periods or 0
return self.window
@property
@@ -249,11 +247,10 @@ def __repr__(self) -> str:
return f"{self._window_type} [{attrs}]"
def __iter__(self):
- window = self._get_window()
obj = self._create_data(self._selected_obj)
- index = self._get_window_indexer(window=window)
+ indexer = self._get_window_indexer()
- start, end = index.get_window_bounds(
+ start, end = indexer.get_window_bounds(
num_values=len(obj),
min_periods=self.min_periods,
center=self.center,
@@ -340,15 +337,17 @@ def _get_roll_func(self, func_name: str) -> Callable[..., Any]:
)
return window_func
- def _get_window_indexer(self, window: int) -> BaseIndexer:
+ def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
if isinstance(self.window, BaseIndexer):
return self.window
if self.is_freq_type:
- return VariableWindowIndexer(index_array=self._on.asi8, window_size=window)
- return FixedWindowIndexer(window_size=window)
+ return VariableWindowIndexer(
+ index_array=self._on.asi8, window_size=self.window
+ )
+ return FixedWindowIndexer(window_size=self.window)
def _apply_series(
self, homogeneous_func: Callable[..., ArrayLike], name: Optional[str] = None
@@ -428,8 +427,7 @@ def _apply(
-------
y : type of input
"""
- window = self._get_window()
- window_indexer = self._get_window_indexer(window)
+ window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
@@ -1750,14 +1748,10 @@ def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
# GH 32865. We leverage rolling.mean, so we pass
# to the rolling constructors the data used when constructing self:
# window width, frequency data, or a BaseIndexer subclass
- if isinstance(self.window, BaseIndexer):
- window = self.window
- else:
- # GH 16058: offset window
- if self.is_freq_type:
- window = self.win_freq
- else:
- window = self._get_window(other)
+ # GH 16058: offset window
+ window = (
+ self._get_cov_corr_window(other) if not self.is_freq_type else self.win_freq
+ )
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
@@ -1899,10 +1893,10 @@ def corr(self, other=None, pairwise=None, **kwargs):
# GH 32865. We leverage rolling.cov and rolling.std here, so we pass
# to the rolling constructors the data used when constructing self:
# window width, frequency data, or a BaseIndexer subclass
- if isinstance(self.window, BaseIndexer):
- window = self.window
- else:
- window = self._get_window(other) if not self.is_freq_type else self.win_freq
+ # GH 16058: offset window
+ window = (
+ self._get_cov_corr_window(other) if not self.is_freq_type else self.win_freq
+ )
def _get_corr(a, b):
a = a.rolling(
@@ -2208,15 +2202,10 @@ class RollingGroupby(BaseWindowGroupby, Rolling):
Provide a rolling groupby implementation.
"""
- def _get_window_indexer(self, window: int) -> GroupbyIndexer:
+ def _get_window_indexer(self) -> GroupbyIndexer:
"""
Return an indexer class that will compute the window start and end bounds
- Parameters
- ----------
- window : int
- window size for FixedWindowIndexer
-
Returns
-------
GroupbyIndexer
@@ -2224,12 +2213,14 @@ def _get_window_indexer(self, window: int) -> GroupbyIndexer:
rolling_indexer: Type[BaseIndexer]
indexer_kwargs: Optional[Dict[str, Any]] = None
index_array = self._on.asi8
+ window = self.window
if isinstance(self.window, BaseIndexer):
rolling_indexer = type(self.window)
indexer_kwargs = self.window.__dict__
assert isinstance(indexer_kwargs, dict) # for mypy
# We'll be using the index of each group later
indexer_kwargs.pop("index_array", None)
+ window = 0
elif self.is_freq_type:
rolling_indexer = VariableWindowIndexer
else:
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/37177 | 2020-10-16T22:15:57Z | 2020-10-17T01:00:21Z | 2020-10-17T01:00:21Z | 2020-10-17T01:00:24Z |
BUG: infer_dtype with decimal/complex | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 922dcd7e74aa0..f4caafb3a9fe7 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -1414,10 +1414,12 @@ def infer_dtype(value: object, skipna: bool = True) -> str:
return "time"
elif is_decimal(val):
- return "decimal"
+ if is_decimal_array(values):
+ return "decimal"
elif is_complex(val):
- return "complex"
+ if is_complex_array(values):
+ return "complex"
elif util.is_float_object(val):
if is_float_array(values):
@@ -1702,6 +1704,34 @@ cpdef bint is_float_array(ndarray values):
return validator.validate(values)
+cdef class ComplexValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return (
+ util.is_complex_object(value)
+ or (util.is_float_object(value) and is_nan(value))
+ )
+
+ cdef inline bint is_array_typed(self) except -1:
+ return issubclass(self.dtype.type, np.complexfloating)
+
+
+cdef bint is_complex_array(ndarray values):
+ cdef:
+ ComplexValidator validator = ComplexValidator(len(values), values.dtype)
+ return validator.validate(values)
+
+
+cdef class DecimalValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return is_decimal(value)
+
+
+cdef bint is_decimal_array(ndarray values):
+ cdef:
+ DecimalValidator validator = DecimalValidator(len(values), values.dtype)
+ return validator.validate(values)
+
+
cdef class StringValidator(Validator):
cdef inline bint is_value_typed(self, object value) except -1:
return isinstance(value, str)
@@ -2546,8 +2576,6 @@ def fast_multiget(dict mapping, ndarray keys, default=np.nan):
# kludge, for Series
return np.empty(0, dtype='f8')
- keys = getattr(keys, 'values', keys)
-
for i in range(n):
val = keys[i]
if val in mapping:
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index c6c54ccb357d5..7fa83eeac8400 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -709,6 +709,9 @@ def test_decimals(self):
result = lib.infer_dtype(arr, skipna=True)
assert result == "mixed"
+ result = lib.infer_dtype(arr[::-1], skipna=True)
+ assert result == "mixed"
+
arr = np.array([Decimal(1), Decimal("NaN"), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == "decimal"
@@ -729,6 +732,9 @@ def test_complex(self, skipna):
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "mixed"
+ result = lib.infer_dtype(arr[::-1], skipna=skipna)
+ assert result == "mixed"
+
# gets cast to complex on array construction
arr = np.array([1, np.nan, 1 + 1j])
result = lib.infer_dtype(arr, skipna=skipna)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Does this need a whatsnew? | https://api.github.com/repos/pandas-dev/pandas/pulls/37176 | 2020-10-16T21:59:49Z | 2020-10-18T14:58:45Z | 2020-10-18T14:58:45Z | 2020-10-18T17:39:28Z |
BUG: fix tab completion | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index a963442ecda1c..66ab954bae288 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -492,6 +492,7 @@ Other
- Fixed metadata propagation in the :class:`Series.dt` and :class:`Series.str` accessors (:issue:`28283`)
- Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`)
- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError``, from a bare ``Exception`` previously (:issue:`35744`)
+- Bug in ``accessor.DirNamesMixin``, where ``dir(obj)`` wouldn't show attributes defined on the instance (:issue:`37173`).
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 2caf1f75f3da1..41212fd49113d 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -4,7 +4,7 @@
that can be mixed into or pinned onto other pandas classes.
"""
-from typing import FrozenSet, Set
+from typing import FrozenSet, List, Set
import warnings
from pandas.util._decorators import doc
@@ -12,15 +12,15 @@
class DirNamesMixin:
_accessors: Set[str] = set()
- _deprecations: FrozenSet[str] = frozenset()
+ _hidden_attrs: FrozenSet[str] = frozenset()
- def _dir_deletions(self):
+ def _dir_deletions(self) -> Set[str]:
"""
Delete unwanted __dir__ for this object.
"""
- return self._accessors | self._deprecations
+ return self._accessors | self._hidden_attrs
- def _dir_additions(self):
+ def _dir_additions(self) -> Set[str]:
"""
Add additional __dir__ for this object.
"""
@@ -33,7 +33,7 @@ def _dir_additions(self):
pass
return rv
- def __dir__(self):
+ def __dir__(self) -> List[str]:
"""
Provide method name lookup and completion.
@@ -41,7 +41,7 @@ def __dir__(self):
-----
Only provide 'public' methods.
"""
- rv = set(dir(type(self)))
+ rv = set(super().__dir__())
rv = (rv - self._dir_deletions()) | self._dir_additions()
return sorted(rv)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 081a363ce03c6..62dd3f89770cd 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -288,7 +288,7 @@ class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMi
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = PandasObject._deprecations | frozenset(["tolist"])
+ _hidden_attrs = PandasObject._hidden_attrs | frozenset(["tolist"])
_typ = "categorical"
_can_hold_na = True
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 2e62fade93dcb..4346e02069667 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -270,7 +270,7 @@ class SparseArray(OpsMixin, PandasObject, ExtensionArray):
"""
_subtyp = "sparse_array" # register ABCSparseArray
- _deprecations = PandasObject._deprecations | frozenset(["get_values"])
+ _hidden_attrs = PandasObject._hidden_attrs | frozenset(["get_values"])
_sparse_index: SparseIndex
def __init__(
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 6af537dcd149a..55cb671278050 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -314,7 +314,7 @@ class IndexOpsMixin(OpsMixin):
# ndarray compatibility
__array_priority__ = 1000
- _deprecations: FrozenSet[str] = frozenset(
+ _hidden_attrs: FrozenSet[str] = frozenset(
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 539275c7ff617..2efad0210778c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -420,7 +420,7 @@ def _constructor(self) -> Type[DataFrame]:
return DataFrame
_constructor_sliced: Type[Series] = Series
- _deprecations: FrozenSet[str] = NDFrame._deprecations | frozenset([])
+ _hidden_attrs: FrozenSet[str] = NDFrame._hidden_attrs | frozenset([])
_accessors: Set[str] = {"sparse"}
@property
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a8586e3b90083..8b2d4ee485202 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -197,7 +197,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
- _deprecations: FrozenSet[str] = frozenset(["get_values", "tshift"])
+ _hidden_attrs: FrozenSet[str] = frozenset(["get_values", "tshift"])
_metadata: List[str] = []
_is_copy = None
_mgr: BlockManager
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 4ab40e25db84e..b26b48910f59b 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -489,6 +489,21 @@ def group_selection_context(groupby: "BaseGroupBy") -> Iterator["BaseGroupBy"]:
class BaseGroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection: Optional[IndexLabel] = None
_apply_allowlist: FrozenSet[str] = frozenset()
+ _hidden_attrs = PandasObject._hidden_attrs | {
+ "as_index",
+ "axis",
+ "dropna",
+ "exclusions",
+ "grouper",
+ "group_keys",
+ "keys",
+ "level",
+ "mutated",
+ "obj",
+ "observed",
+ "sort",
+ "squeeze",
+ }
def __init__(
self,
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index b9b2c4b07d37a..1dd85d072f253 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -28,6 +28,11 @@
class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
+ _hidden_attrs = PandasObject._hidden_attrs | {
+ "orig",
+ "name",
+ }
+
def __init__(self, data: "Series", orig):
if not isinstance(data, ABCSeries):
raise TypeError(
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6b71e455782e3..4bcc237c996ad 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -193,9 +193,9 @@ class Index(IndexOpsMixin, PandasObject):
"""
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations: FrozenSet[str] = (
- PandasObject._deprecations
- | IndexOpsMixin._deprecations
+ _hidden_attrs: FrozenSet[str] = (
+ PandasObject._hidden_attrs
+ | IndexOpsMixin._hidden_attrs
| frozenset(["contains", "set_value"])
)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d012d5704f716..f7d8bb3601ec5 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -258,7 +258,7 @@ class MultiIndex(Index):
of the mentioned helper methods.
"""
- _deprecations = Index._deprecations | frozenset()
+ _hidden_attrs = Index._hidden_attrs | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 55aa32dd028ef..c9b307d6c4380 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -181,9 +181,9 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_metadata: List[str] = ["name"]
_internal_names_set = {"index"} | generic.NDFrame._internal_names_set
_accessors = {"dt", "cat", "str", "sparse"}
- _deprecations = (
- base.IndexOpsMixin._deprecations
- | generic.NDFrame._deprecations
+ _hidden_attrs = (
+ base.IndexOpsMixin._hidden_attrs
+ | generic.NDFrame._hidden_attrs
| frozenset(["compress", "ptp"])
)
diff --git a/pandas/tests/test_register_accessor.py b/pandas/tests/test_register_accessor.py
index d839936f731a3..6e224245076ee 100644
--- a/pandas/tests/test_register_accessor.py
+++ b/pandas/tests/test_register_accessor.py
@@ -4,6 +4,22 @@
import pandas as pd
import pandas._testing as tm
+from pandas.core import accessor
+
+
+def test_dirname_mixin():
+ # GH37173
+
+ class X(accessor.DirNamesMixin):
+ x = 1
+ y: int
+
+ def __init__(self):
+ self.z = 3
+
+ result = [attr_name for attr_name in dir(X()) if not attr_name.startswith("_")]
+
+ assert result == ["x", "z"]
@contextlib.contextmanager
| Currently tab completion doesn't work on attributes defined on the instance, only ones defined on the class. This fixes that.
Example:
```python
>>> class MyCls(pd.core.accessor.DirNamesMixin):
... x = 1
...
... def __init__(self):
... self.y = 2
...
>>> x = MyCls()
>>> "x" in dir(x)
True # master and this PR
>>> "y" in dir(x)
False # master
True # this PR
```
Also some type fixups. | https://api.github.com/repos/pandas-dev/pandas/pulls/37173 | 2020-10-16T21:40:31Z | 2020-10-26T13:27:02Z | 2020-10-26T13:27:02Z | 2020-11-28T18:41:42Z |
CLN: ensure we pass correct type to DTI/TDI shallow_copy | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index d2005d46bbbf1..4e07a3e0c6df8 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -696,6 +696,8 @@ def factorize(
# return original tenor
if isinstance(original, ABCIndexClass):
+ if original.dtype.kind in ["m", "M"] and isinstance(uniques, np.ndarray):
+ uniques = type(original._data)._simple_new(uniques, dtype=original.dtype)
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
@@ -1650,7 +1652,8 @@ def take_nd(
"""
mask_info = None
- if is_extension_array_dtype(arr):
+ if isinstance(arr, ABCExtensionArray):
+ # Check for EA to catch DatetimeArray, TimedeltaArray
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
arr = extract_array(arr)
@@ -2043,7 +2046,7 @@ def safe_sort(
"Only list-like objects are allowed to be passed to safe_sort as values"
)
- if not isinstance(values, np.ndarray) and not is_extension_array_dtype(values):
+ if not isinstance(values, (np.ndarray, ABCExtensionArray)):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 87dd15d5b142b..2ebf2389823e9 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2664,6 +2664,11 @@ def _union(self, other, sort):
return self._shallow_copy(result)
def _wrap_setop_result(self, other, result):
+ if isinstance(self, (ABCDatetimeIndex, ABCTimedeltaIndex)) and isinstance(
+ result, np.ndarray
+ ):
+ result = type(self._data)._simple_new(result, dtype=self.dtype)
+
name = get_op_result_name(self, other)
if isinstance(result, Index):
if result.name != name:
@@ -2740,10 +2745,10 @@ def intersection(self, other, sort=False):
indexer = algos.unique1d(Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
- result = other.take(indexer)
+ result = other.take(indexer)._values
if sort is None:
- result = algos.safe_sort(result.values)
+ result = algos.safe_sort(result)
return self._wrap_setop_result(other, result)
@@ -2800,7 +2805,7 @@ def difference(self, other, sort=None):
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
- the_diff = this.values.take(label_diff)
+ the_diff = this._values.take(label_diff)
if sort is None:
try:
the_diff = algos.safe_sort(the_diff)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 3845a601000a0..017dc6527944a 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -696,9 +696,6 @@ def _shallow_copy(self, values=None, name: Label = lib.no_default):
name = self.name if name is lib.no_default else name
if values is not None:
- # TODO: We would rather not get here
- if isinstance(values, np.ndarray):
- values = type(self._data)(values, dtype=self.dtype)
return self._simple_new(values, name=name)
result = self._simple_new(self._data, name=name)
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index 94b10572fb5e1..6a681ede8ff42 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -320,6 +320,10 @@ def test_get_unique_index(self, index):
vals[0] = np.nan
vals_unique = vals[:2]
+ if index.dtype.kind in ["m", "M"]:
+ # i.e. needs_i8_conversion but not period_dtype, as above
+ vals = type(index._data)._simple_new(vals, dtype=index.dtype)
+ vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype)
idx_nan = index._shallow_copy(vals)
idx_unique_nan = index._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique is True
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37171 | 2020-10-16T21:10:21Z | 2020-10-17T13:41:27Z | 2020-10-17T13:41:27Z | 2020-10-22T12:02:57Z |
BUG: MultiIndex comparison with tuple #21517 | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index d8961f5fdb959..874caee23dae6 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -394,6 +394,7 @@ Numeric
- Bug in :meth:`DataFrame.__rmatmul__` error handling reporting transposed shapes (:issue:`21581`)
- Bug in :class:`Series` flex arithmetic methods where the result when operating with a ``list``, ``tuple`` or ``np.ndarray`` would have an incorrect name (:issue:`36760`)
- Bug in :class:`IntegerArray` multiplication with ``timedelta`` and ``np.timedelta64`` objects (:issue:`36870`)
+- Bug in :class:`MultiIndex` comparison with tuple incorrectly treating tuple as array-like (:issue:`21517`)
- Bug in :meth:`DataFrame.diff` with ``datetime64`` dtypes including ``NaT`` values failing to fill ``NaT`` results correctly (:issue:`32441`)
- Bug in :class:`DataFrame` arithmetic ops incorrectly accepting keyword arguments (:issue:`36843`)
- Bug in :class:`IntervalArray` comparisons with :class:`Series` not returning :class:`Series` (:issue:`36908`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2ebf2389823e9..3713eb6da60ac 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -65,7 +65,6 @@
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import (
- ABCCategorical,
ABCDatetimeIndex,
ABCMultiIndex,
ABCPandasArray,
@@ -83,6 +82,7 @@
from pandas.core.arrays.datetimes import tz_to_dtype, validate_tz_from_dtype
from pandas.core.base import IndexOpsMixin, PandasObject
import pandas.core.common as com
+from pandas.core.construction import extract_array
from pandas.core.indexers import deprecate_ndim_indexing
from pandas.core.indexes.frozen import FrozenList
from pandas.core.ops import get_op_result_name
@@ -5376,11 +5376,13 @@ def _cmp_method(self, other, op):
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
- if is_object_dtype(self.dtype) and isinstance(other, ABCCategorical):
- left = type(other)(self._values, dtype=other.dtype)
- return op(left, other)
- elif is_object_dtype(self.dtype) and isinstance(other, ExtensionArray):
- # e.g. PeriodArray
+ if not isinstance(other, ABCMultiIndex):
+ other = extract_array(other, extract_numpy=True)
+ else:
+ other = np.asarray(other)
+
+ if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray):
+ # e.g. PeriodArray, Categorical
with np.errstate(all="ignore"):
result = op(self._values, other)
@@ -5395,7 +5397,7 @@ def _cmp_method(self, other, op):
else:
with np.errstate(all="ignore"):
- result = ops.comparison_op(self._values, np.asarray(other), op)
+ result = ops.comparison_op(self._values, other, op)
return result
diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py
index 184cedea7dc5c..fec1c0e44cd9f 100644
--- a/pandas/tests/indexes/multi/test_equivalence.py
+++ b/pandas/tests/indexes/multi/test_equivalence.py
@@ -84,6 +84,46 @@ def test_equals_op(idx):
tm.assert_series_equal(series_a == item, Series(expected3))
+def test_compare_tuple():
+ # GH#21517
+ mi = MultiIndex.from_product([[1, 2]] * 2)
+
+ all_false = np.array([False, False, False, False])
+
+ result = mi == mi[0]
+ expected = np.array([True, False, False, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = mi != mi[0]
+ tm.assert_numpy_array_equal(result, ~expected)
+
+ result = mi < mi[0]
+ tm.assert_numpy_array_equal(result, all_false)
+
+ result = mi <= mi[0]
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = mi > mi[0]
+ tm.assert_numpy_array_equal(result, ~expected)
+
+ result = mi >= mi[0]
+ tm.assert_numpy_array_equal(result, ~all_false)
+
+
+def test_compare_tuple_strs():
+ # GH#34180
+
+ mi = MultiIndex.from_tuples([("a", "b"), ("b", "c"), ("c", "a")])
+
+ result = mi == ("c", "a")
+ expected = np.array([False, False, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = mi == ("c",)
+ expected = np.array([False, False, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+
def test_equals_multi(idx):
assert idx.equals(idx)
assert not idx.equals(idx.values)
| - [x] closes #21517
- [x] closes #34180
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37170 | 2020-10-16T20:54:17Z | 2020-10-18T15:01:55Z | 2020-10-18T15:01:55Z | 2020-10-18T17:35:26Z |
NIT: Format text.rst doc code | diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst
index 9dd4fb68ae26a..9b1c9b8d04270 100644
--- a/doc/source/user_guide/text.rst
+++ b/doc/source/user_guide/text.rst
@@ -302,10 +302,10 @@ positional argument (a regex object) and return a string.
return m.group(0)[::-1]
- pd.Series(
- ["foo 123", "bar baz", np.nan],
- dtype="string"
- ).str.replace(pat, repl, regex=True)
+ pd.Series(["foo 123", "bar baz", np.nan], dtype="string").str.replace(
+ pat, repl, regex=True
+ )
+
# Using regex groups
pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
| Minor formatting mistake I made in another PR | https://api.github.com/repos/pandas-dev/pandas/pulls/37168 | 2020-10-16T20:01:58Z | 2020-10-16T23:47:09Z | 2020-10-16T23:47:09Z | 2020-10-17T00:03:23Z |
BUG: Bug in quantile() and median() returned wrong result for non monotonic window borders | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index f1f24ab7a101b..4fbad4df3e78b 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -499,6 +499,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupBy.rolling` returned wrong values with timeaware window containing ``NaN``. Raises ``ValueError`` because windows are not monotonic now (:issue:`34617`)
- Bug in :meth:`Rolling.__iter__` where a ``ValueError`` was not raised when ``min_periods`` was larger than ``window`` (:issue:`37156`)
- Using :meth:`Rolling.var()` instead of :meth:`Rolling.std()` avoids numerical issues for :meth:`Rolling.corr()` when :meth:`Rolling.var()` is still within floating point precision while :meth:`Rolling.std()` is not (:issue:`31286`)
+- Bug in :meth:`Rolling.median` and :meth:`Rolling.quantile` returned wrong values for :class:`BaseIndexer` subclasses with non-monotonic starting or ending points for windows (:issue:`37153`)
Reshaping
^^^^^^^^^
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 2c315ca13e563..b2dbf7802e6f0 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -686,6 +686,11 @@ def roll_median_c(ndarray[float64_t] values, ndarray[int64_t] start,
int64_t nobs = 0, N = len(values), s, e, win
int midpoint
ndarray[float64_t] output
+ bint is_monotonic_increasing_bounds
+
+ is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds(
+ start, end
+ )
# we use the Fixed/Variable Indexer here as the
# actual skiplist ops outweigh any window computation costs
@@ -705,7 +710,7 @@ def roll_median_c(ndarray[float64_t] values, ndarray[int64_t] start,
s = start[i]
e = end[i]
- if i == 0:
+ if i == 0 or not is_monotonic_increasing_bounds:
# setup
for j in range(s, e):
@@ -733,7 +738,6 @@ def roll_median_c(ndarray[float64_t] values, ndarray[int64_t] start,
if notnan(val):
skiplist_remove(sl, val)
nobs -= 1
-
if nobs >= minp:
midpoint = <int>(nobs / 2)
if nobs % 2:
@@ -748,6 +752,10 @@ def roll_median_c(ndarray[float64_t] values, ndarray[int64_t] start,
output[i] = res
+ if not is_monotonic_increasing_bounds:
+ nobs = 0
+ sl = skiplist_init(<int>win)
+
skiplist_destroy(sl)
if err:
raise MemoryError("skiplist_insert failed")
@@ -935,7 +943,7 @@ def roll_quantile(ndarray[float64_t, cast=True] values, ndarray[int64_t] start,
cdef:
float64_t val, prev, midpoint, idx_with_fraction
skiplist_t *skiplist
- int64_t nobs = 0, i, j, s, e, N = len(values)
+ int64_t nobs = 0, i, j, s, e, N = len(values), win
Py_ssize_t idx
ndarray[float64_t] output
float64_t vlow, vhigh
@@ -950,6 +958,9 @@ def roll_quantile(ndarray[float64_t, cast=True] values, ndarray[int64_t] start,
except KeyError:
raise ValueError(f"Interpolation '{interpolation}' is not supported")
+ is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds(
+ start, end
+ )
# we use the Fixed/Variable Indexer here as the
# actual skiplist ops outweigh any window computation costs
output = np.empty(N, dtype=float)
@@ -967,7 +978,10 @@ def roll_quantile(ndarray[float64_t, cast=True] values, ndarray[int64_t] start,
s = start[i]
e = end[i]
- if i == 0:
+ if i == 0 or not is_monotonic_increasing_bounds:
+ if not is_monotonic_increasing_bounds:
+ nobs = 0
+ skiplist = skiplist_init(<int>win)
# setup
for j in range(s, e):
@@ -977,7 +991,6 @@ def roll_quantile(ndarray[float64_t, cast=True] values, ndarray[int64_t] start,
skiplist_insert(skiplist, val)
else:
-
# calculate adds
for j in range(end[i - 1], e):
val = values[j]
@@ -991,7 +1004,6 @@ def roll_quantile(ndarray[float64_t, cast=True] values, ndarray[int64_t] start,
if notnan(val):
skiplist_remove(skiplist, val)
nobs -= 1
-
if nobs >= minp:
if nobs == 1:
# Single value in skip list
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index 7f2d58effe1ae..1723330ec40e1 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -263,3 +263,31 @@ def test_fixed_forward_indexer_count():
result = df.rolling(window=indexer, min_periods=0).count()
expected = DataFrame({"b": [0.0, 0.0, 1.0, 1.0]})
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ ("end_value", "values"), [(1, [0.0, 1, 1, 3, 2]), (-1, [0.0, 1, 0, 3, 1])]
+)
+@pytest.mark.parametrize(("func", "args"), [("median", []), ("quantile", [0.5])])
+def test_indexer_quantile_sum(end_value, values, func, args):
+ # GH 37153
+ class CustomIndexer(BaseIndexer):
+ def get_window_bounds(self, num_values, min_periods, center, closed):
+ start = np.empty(num_values, dtype=np.int64)
+ end = np.empty(num_values, dtype=np.int64)
+ for i in range(num_values):
+ if self.use_expanding[i]:
+ start[i] = 0
+ end[i] = max(i + end_value, 1)
+ else:
+ start[i] = i
+ end[i] = i + self.window_size
+ return start, end
+
+ use_expanding = [True, False, True, False, True]
+ df = DataFrame({"values": range(5)})
+
+ indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
+ result = getattr(df.rolling(indexer), func)(*args)
+ expected = DataFrame({"values": values})
+ tm.assert_frame_equal(result, expected)
| - [x] closes #37153
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
cc @mroeschke
Values were not added to the window again if starting point was moved back and values were not removed from the window, when ending point was moved back
Should I create functions for the deletion and addition? | https://api.github.com/repos/pandas-dev/pandas/pulls/37166 | 2020-10-16T19:02:58Z | 2020-10-30T23:11:48Z | 2020-10-30T23:11:48Z | 2020-10-30T23:17:52Z |
ENH: DataFrame __divmod__, __rdivmod__ | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index dfbbb456f50b6..7a1ccb3fa9ac3 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -194,6 +194,7 @@ Other enhancements
- Added :meth:`Rolling.sem()` and :meth:`Expanding.sem()` to compute the standard error of mean (:issue:`26476`).
- :meth:`Rolling.var()` and :meth:`Rolling.std()` use Kahan summation and Welfords Method to avoid numerical issues (:issue:`37051`)
- :meth:`DataFrame.plot` now recognizes ``xlabel`` and ``ylabel`` arguments for plots of type ``scatter`` and ``hexbin`` (:issue:`37001`)
+- :class:`DataFrame` now supports ``divmod`` operation (:issue:`37165`)
.. _whatsnew_120.api_breaking.python:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index aca626805a0e3..a017ecf53c6ad 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5957,6 +5957,18 @@ def _construct_result(self, result) -> DataFrame:
out.index = self.index
return out
+ def __divmod__(self, other) -> Tuple[DataFrame, DataFrame]:
+ # Naive implementation, room for optimization
+ div = self // other
+ mod = self - div * other
+ return div, mod
+
+ def __rdivmod__(self, other) -> Tuple[DataFrame, DataFrame]:
+ # Naive implementation, room for optimization
+ div = other // self
+ mod = other - div * self
+ return div, mod
+
# ----------------------------------------------------------------------
# Combination-Related
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 3e979aed0551f..90a3ed6d75393 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -1900,10 +1900,13 @@ def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
result = tdarr % three_days
tm.assert_equal(result, expected)
- if box_with_array is pd.DataFrame:
- pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
+ warn = None
+ if box_with_array is pd.DataFrame and isinstance(three_days, pd.DateOffset):
+ warn = PerformanceWarning
+
+ with tm.assert_produces_warning(warn):
+ result = divmod(tdarr, three_days)
- result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
@@ -1921,9 +1924,6 @@ def test_td64arr_mod_int(self, box_with_array):
with pytest.raises(TypeError, match=msg):
2 % tdarr
- if box_with_array is pd.DataFrame:
- pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
-
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
@@ -1939,9 +1939,6 @@ def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
result = three_days % tdarr
tm.assert_equal(result, expected)
- if box_with_array is pd.DataFrame:
- pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
-
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37165 | 2020-10-16T18:50:53Z | 2020-10-17T13:43:31Z | 2020-10-17T13:43:31Z | 2020-10-17T15:09:04Z |
ENH: __repr__ for 2D DTA/TDA | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 948ffdc1f7c01..4ac32b409883d 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -240,3 +240,24 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs):
else:
msg = f"'{type(self).__name__}' does not implement reduction '{name}'"
raise TypeError(msg)
+
+ # ------------------------------------------------------------------------
+
+ def __repr__(self) -> str:
+ if self.ndim == 1:
+ return super().__repr__()
+
+ from pandas.io.formats.printing import format_object_summary
+
+ # the short repr has no trailing newline, while the truncated
+ # repr does. So we include a newline in our template, and strip
+ # any trailing newlines from format_object_summary
+ lines = [
+ format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
+ ", \n"
+ )
+ for x in self
+ ]
+ data = ",\n".join(lines)
+ class_name = f"<{type(self).__name__}>"
+ return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}"
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index b1b8b513320e9..4ce959f2b833f 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -564,7 +564,8 @@ def __iter__(self):
tstamp : Timestamp
"""
if self.ndim > 1:
- return (self[n] for n in range(len(self)))
+ for i in range(len(self)):
+ yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 82cd54182a33d..adade2460e8d3 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -352,7 +352,8 @@ def astype(self, dtype, copy: bool = True):
def __iter__(self):
if self.ndim > 1:
- return (self[n] for n in range(len(self)))
+ for i in range(len(self)):
+ yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index a961cf14b2e5c..2aa2fbe59cbbf 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -328,11 +328,41 @@ def test_iter_2d(self, arr1d):
data2d = arr1d._data[:3, np.newaxis]
arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)
result = list(arr2d)
+ assert len(result) == 3
for x in result:
assert isinstance(x, type(arr1d))
assert x.ndim == 1
assert x.dtype == arr1d.dtype
+ def test_repr_2d(self, arr1d):
+ data2d = arr1d._data[:3, np.newaxis]
+ arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)
+
+ result = repr(arr2d)
+
+ if isinstance(arr2d, TimedeltaArray):
+ expected = (
+ f"<{type(arr2d).__name__}>\n"
+ "[\n"
+ f"['{arr1d[0]._repr_base()}'],\n"
+ f"['{arr1d[1]._repr_base()}'],\n"
+ f"['{arr1d[2]._repr_base()}']\n"
+ "]\n"
+ f"Shape: (3, 1), dtype: {arr1d.dtype}"
+ )
+ else:
+ expected = (
+ f"<{type(arr2d).__name__}>\n"
+ "[\n"
+ f"['{arr1d[0]}'],\n"
+ f"['{arr1d[1]}'],\n"
+ f"['{arr1d[2]}']\n"
+ "]\n"
+ f"Shape: (3, 1), dtype: {arr1d.dtype}"
+ )
+
+ assert result == expected
+
def test_setitem(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37164 | 2020-10-16T18:38:01Z | 2020-11-02T21:26:54Z | 2020-11-02T21:26:54Z | 2020-11-02T21:47:01Z |
DOC: move trellis under visualization.rst | diff --git a/doc/source/rplot.rst b/doc/source/rplot.rst
deleted file mode 100644
index 46b57cea2d9ed..0000000000000
--- a/doc/source/rplot.rst
+++ /dev/null
@@ -1,179 +0,0 @@
-.. currentmodule:: pandas
-.. _rplot:
-
-.. ipython:: python
- :suppress:
-
- import numpy as np
- np.random.seed(123456)
- from pandas import *
- options.display.max_rows=15
- import pandas.util.testing as tm
- randn = np.random.randn
- np.set_printoptions(precision=4, suppress=True)
- import matplotlib.pyplot as plt
- tips_data = read_csv('data/tips.csv')
- iris_data = read_csv('data/iris.data')
- from pandas import read_csv
- from pandas.tools.plotting import radviz
- import pandas.tools.rplot as rplot
- plt.close('all')
-
-**************************
-Trellis plotting interface
-**************************
-
-.. note::
-
- The tips data set can be downloaded `here
- <http://wesmckinney.com/files/tips.csv>`__. Once you download it execute
-
- .. code-block:: python
-
- from pandas import read_csv
- tips_data = read_csv('tips.csv')
-
- from the directory where you downloaded the file.
-
-We import the rplot API:
-
-.. ipython:: python
-
- import pandas.tools.rplot as rplot
-
---------
-Examples
---------
-
-RPlot is a flexible API for producing Trellis plots. These plots allow you to arrange data in a rectangular grid by values of certain attributes.
-
-.. ipython:: python
-
- plt.figure()
-
- plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
- plot.add(rplot.TrellisGrid(['sex', 'smoker']))
- plot.add(rplot.GeomHistogram())
-
- @savefig rplot1_tips.png
- plot.render(plt.gcf())
-
-In the example above, data from the tips data set is arranged by the attributes 'sex' and 'smoker'. Since both of those attributes can take on one of two values, the resulting grid has two columns and two rows. A histogram is displayed for each cell of the grid.
-
-.. ipython:: python
-
- plt.figure()
-
- plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
- plot.add(rplot.TrellisGrid(['sex', 'smoker']))
- plot.add(rplot.GeomDensity())
-
- @savefig rplot2_tips.png
- plot.render(plt.gcf())
-
-Example above is the same as previous except the plot is set to kernel density estimation. This shows how easy it is to have different plots for the same Trellis structure.
-
-.. ipython:: python
-
- plt.figure()
-
- plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
- plot.add(rplot.TrellisGrid(['sex', 'smoker']))
- plot.add(rplot.GeomScatter())
- plot.add(rplot.GeomPolyFit(degree=2))
-
- @savefig rplot3_tips.png
- plot.render(plt.gcf())
-
-The plot above shows that it is possible to have two or more plots for the same data displayed on the same Trellis grid cell.
-
-.. ipython:: python
-
- plt.figure()
-
- plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
- plot.add(rplot.TrellisGrid(['sex', 'smoker']))
- plot.add(rplot.GeomScatter())
- plot.add(rplot.GeomDensity2D())
-
- @savefig rplot4_tips.png
- plot.render(plt.gcf())
-
-Above is a similar plot but with 2D kernel density estimation plot superimposed.
-
-.. ipython:: python
-
- plt.figure()
-
- plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
- plot.add(rplot.TrellisGrid(['sex', '.']))
- plot.add(rplot.GeomHistogram())
-
- @savefig rplot5_tips.png
- plot.render(plt.gcf())
-
-It is possible to only use one attribute for grouping data. The example above only uses 'sex' attribute. If the second grouping attribute is not specified, the plots will be arranged in a column.
-
-.. ipython:: python
-
- plt.figure()
-
- plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
- plot.add(rplot.TrellisGrid(['.', 'smoker']))
- plot.add(rplot.GeomHistogram())
-
- @savefig rplot6_tips.png
- plot.render(plt.gcf())
-
-If the first grouping attribute is not specified the plots will be arranged in a row.
-
-.. ipython:: python
-
- plt.figure()
-
- plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
- plot.add(rplot.TrellisGrid(['.', 'smoker']))
- plot.add(rplot.GeomHistogram())
-
- plot = rplot.RPlot(tips_data, x='tip', y='total_bill')
- plot.add(rplot.TrellisGrid(['sex', 'smoker']))
- plot.add(rplot.GeomPoint(size=80.0, colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size'), alpha=1.0))
-
- @savefig rplot7_tips.png
- plot.render(plt.gcf())
-
-As shown above, scatter plots are also possible. Scatter plots allow you to map various data attributes to graphical properties of the plot. In the example above the colour and shape of the scatter plot graphical objects is mapped to 'day' and 'size' attributes respectively. You use scale objects to specify these mappings. The list of scale classes is given below with initialization arguments for quick reference.
-
-------
-Scales
-------
-
-::
-
- ScaleGradient(column, colour1, colour2)
-
-This one allows you to map an attribute (specified by parameter column) value to the colour of a graphical object. The larger the value of the attribute the closer the colour will be to colour2, the smaller the value, the closer it will be to colour1.
-
-::
-
- ScaleGradient2(column, colour1, colour2, colour3)
-
-The same as ScaleGradient but interpolates linearly between three colours instead of two.
-
-::
-
- ScaleSize(column, min_size, max_size, transform)
-
-Map attribute value to size of the graphical object. Parameter min_size (default 5.0) is the minimum size of the graphical object, max_size (default 100.0) is the maximum size and transform is a one argument function that will be used to transform the attribute value (defaults to lambda x: x).
-
-::
-
- ScaleShape(column)
-
-Map the shape of the object to attribute value. The attribute has to be categorical.
-
-::
-
- ScaleRandomColour(column)
-
-Assign a random colour to a value of categorical attribute specified by column.
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index e1c64e641e1e7..adbecbe688945 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -1378,3 +1378,183 @@ when plotting a large number of points.
:suppress:
plt.close('all')
+
+
+.. _rplot:
+
+
+Trellis plotting interface
+--------------------------
+
+.. ipython:: python
+ :suppress:
+
+ import numpy as np
+ np.random.seed(123456)
+ from pandas import *
+ options.display.max_rows=15
+ import pandas.util.testing as tm
+ randn = np.random.randn
+ np.set_printoptions(precision=4, suppress=True)
+ import matplotlib.pyplot as plt
+ tips_data = read_csv('data/tips.csv')
+ iris_data = read_csv('data/iris.data')
+ from pandas import read_csv
+ from pandas.tools.plotting import radviz
+ import pandas.tools.rplot as rplot
+ plt.close('all')
+
+
+.. note::
+
+ The tips data set can be downloaded `here
+ <http://wesmckinney.com/files/tips.csv>`__. Once you download it execute
+
+ .. code-block:: python
+
+ from pandas import read_csv
+ tips_data = read_csv('tips.csv')
+
+ from the directory where you downloaded the file.
+
+We import the rplot API:
+
+.. ipython:: python
+
+ import pandas.tools.rplot as rplot
+
+Examples
+~~~~~~~~
+
+RPlot is a flexible API for producing Trellis plots. These plots allow you to arrange data in a rectangular grid by values of certain attributes.
+
+.. ipython:: python
+
+ plt.figure()
+
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
+ plot.add(rplot.TrellisGrid(['sex', 'smoker']))
+ plot.add(rplot.GeomHistogram())
+
+ @savefig rplot1_tips.png
+ plot.render(plt.gcf())
+
+In the example above, data from the tips data set is arranged by the attributes 'sex' and 'smoker'. Since both of those attributes can take on one of two values, the resulting grid has two columns and two rows. A histogram is displayed for each cell of the grid.
+
+.. ipython:: python
+
+ plt.figure()
+
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
+ plot.add(rplot.TrellisGrid(['sex', 'smoker']))
+ plot.add(rplot.GeomDensity())
+
+ @savefig rplot2_tips.png
+ plot.render(plt.gcf())
+
+Example above is the same as previous except the plot is set to kernel density estimation. This shows how easy it is to have different plots for the same Trellis structure.
+
+.. ipython:: python
+
+ plt.figure()
+
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
+ plot.add(rplot.TrellisGrid(['sex', 'smoker']))
+ plot.add(rplot.GeomScatter())
+ plot.add(rplot.GeomPolyFit(degree=2))
+
+ @savefig rplot3_tips.png
+ plot.render(plt.gcf())
+
+The plot above shows that it is possible to have two or more plots for the same data displayed on the same Trellis grid cell.
+
+.. ipython:: python
+
+ plt.figure()
+
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
+ plot.add(rplot.TrellisGrid(['sex', 'smoker']))
+ plot.add(rplot.GeomScatter())
+ plot.add(rplot.GeomDensity2D())
+
+ @savefig rplot4_tips.png
+ plot.render(plt.gcf())
+
+Above is a similar plot but with 2D kernel density estimation plot superimposed.
+
+.. ipython:: python
+
+ plt.figure()
+
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
+ plot.add(rplot.TrellisGrid(['sex', '.']))
+ plot.add(rplot.GeomHistogram())
+
+ @savefig rplot5_tips.png
+ plot.render(plt.gcf())
+
+It is possible to only use one attribute for grouping data. The example above only uses 'sex' attribute. If the second grouping attribute is not specified, the plots will be arranged in a column.
+
+.. ipython:: python
+
+ plt.figure()
+
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
+ plot.add(rplot.TrellisGrid(['.', 'smoker']))
+ plot.add(rplot.GeomHistogram())
+
+ @savefig rplot6_tips.png
+ plot.render(plt.gcf())
+
+If the first grouping attribute is not specified the plots will be arranged in a row.
+
+.. ipython:: python
+
+ plt.figure()
+
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
+ plot.add(rplot.TrellisGrid(['.', 'smoker']))
+ plot.add(rplot.GeomHistogram())
+
+ plot = rplot.RPlot(tips_data, x='tip', y='total_bill')
+ plot.add(rplot.TrellisGrid(['sex', 'smoker']))
+ plot.add(rplot.GeomPoint(size=80.0, colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size'), alpha=1.0))
+
+ @savefig rplot7_tips.png
+ plot.render(plt.gcf())
+
+As shown above, scatter plots are also possible. Scatter plots allow you to map various data attributes to graphical properties of the plot. In the example above the colour and shape of the scatter plot graphical objects is mapped to 'day' and 'size' attributes respectively. You use scale objects to specify these mappings. The list of scale classes is given below with initialization arguments for quick reference.
+
+
+Scales
+~~~~~~
+
+::
+
+ ScaleGradient(column, colour1, colour2)
+
+This one allows you to map an attribute (specified by parameter column) value to the colour of a graphical object. The larger the value of the attribute the closer the colour will be to colour2, the smaller the value, the closer it will be to colour1.
+
+::
+
+ ScaleGradient2(column, colour1, colour2, colour3)
+
+The same as ScaleGradient but interpolates linearly between three colours instead of two.
+
+::
+
+ ScaleSize(column, min_size, max_size, transform)
+
+Map attribute value to size of the graphical object. Parameter min_size (default 5.0) is the minimum size of the graphical object, max_size (default 100.0) is the maximum size and transform is a one argument function that will be used to transform the attribute value (defaults to lambda x: x).
+
+::
+
+ ScaleShape(column)
+
+Map the shape of the object to attribute value. The attribute has to be categorical.
+
+::
+
+ ScaleRandomColour(column)
+
+Assign a random colour to a value of categorical attribute specified by column.
| Closes https://github.com/pydata/pandas/issues/8351
Just moved it and changed a few heading levels. I could add a note about considering seaborn. I've never used rplot.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8353 | 2014-09-22T14:57:08Z | 2014-09-22T21:46:12Z | 2014-09-22T21:46:12Z | 2017-05-15T21:16:01Z |
ENH/BUG: allow timedelta resamples | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index c66cda58fa8a0..db55786ba0d1a 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -563,7 +563,7 @@ TimedeltaIndex/Scalar
We introduce a new scalar type ``Timedelta``, which is a subclass of ``datetime.timedelta``, and behaves in a similar manner,
but allows compatibility with ``np.timedelta64`` types as well as a host of custom representation, parsing, and attributes.
This type is very similar to how ``Timestamp`` works for ``datetimes``. It is a nice-API box for the type. See the :ref:`docs <timedeltas.timedeltas>`.
-(:issue:`3009`, :issue:`4533`, :issue:`8209`, :issue:`8187`, :issue:`8190`, :issue:`7869`, :issue:`7661`)
+(:issue:`3009`, :issue:`4533`, :issue:`8209`, :issue:`8187`, :issue:`8190`, :issue:`7869`, :issue:`7661`, :issue:`8345`)
.. warning::
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index 01aff164d8384..aa72113cba475 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -5,6 +5,7 @@
from pandas.core.groupby import BinGrouper, Grouper
from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod
from pandas.tseries.index import DatetimeIndex, date_range
+from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.offsets import DateOffset, Tick, _delta_to_nanoseconds
from pandas.tseries.period import PeriodIndex, period_range
import pandas.tseries.tools as tools
@@ -96,10 +97,12 @@ def resample(self, obj):
obj = self.obj.to_timestamp(how=self.convention)
self._set_grouper(obj)
rs = self._resample_timestamps()
+ elif isinstance(ax, TimedeltaIndex):
+ rs = self._resample_timestamps(kind='timedelta')
elif len(ax) == 0:
return self.obj
else: # pragma: no cover
- raise TypeError('Only valid with DatetimeIndex or PeriodIndex')
+ raise TypeError('Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex')
rs_axis = rs._get_axis(self.axis)
rs_axis.name = ax.name
@@ -109,13 +112,17 @@ def _get_grouper(self, obj):
self._set_grouper(obj)
return self._get_binner_for_resample()
- def _get_binner_for_resample(self):
+ def _get_binner_for_resample(self, kind=None):
# create the BinGrouper
# assume that self.set_grouper(obj) has already been called
ax = self.ax
- if self.kind is None or self.kind == 'timestamp':
+ if kind is None:
+ kind = self.kind
+ if kind is None or kind == 'timestamp':
self.binner, bins, binlabels = self._get_time_bins(ax)
+ elif kind == 'timedelta':
+ self.binner, bins, binlabels = self._get_time_delta_bins(ax)
else:
self.binner, bins, binlabels = self._get_time_period_bins(ax)
@@ -217,6 +224,25 @@ def _adjust_bin_edges(self, binner, ax_values):
return binner, bin_edges
+ def _get_time_delta_bins(self, ax):
+ if not isinstance(ax, TimedeltaIndex):
+ raise TypeError('axis must be a TimedeltaIndex, but got '
+ 'an instance of %r' % type(ax).__name__)
+
+ if not len(ax):
+ binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name)
+ return binner, [], labels
+
+ labels = binner = TimedeltaIndex(start=ax[0],
+ end=ax[-1],
+ freq=self.freq,
+ name=ax.name)
+
+ end_stamps = labels + 1
+ bins = ax.searchsorted(end_stamps, side='left')
+
+ return binner, bins, labels
+
def _get_time_period_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
@@ -242,11 +268,11 @@ def _get_time_period_bins(self, ax):
def _agg_method(self):
return self.how if self.how else _DEFAULT_METHOD
- def _resample_timestamps(self):
+ def _resample_timestamps(self, kind=None):
# assumes set_grouper(obj) already called
axlabels = self.ax
- self._get_binner_for_resample()
+ self._get_binner_for_resample(kind=kind)
grouper = self.grouper
binner = self.binner
obj = self.obj
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index f4a96f5defab0..bafba847257f0 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -146,7 +146,7 @@ def test_resample_how_callables(self):
data = np.arange(5, dtype=np.int64)
ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
df = pd.DataFrame({"A": data, "B": data}, index=ind)
-
+
def fn(x, a=1):
return str(type(x))
@@ -164,7 +164,18 @@ def __call__(self, x):
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
-
+
+ def test_resample_with_timedeltas(self):
+
+ expected = DataFrame({'A' : np.arange(1480)})
+ expected = expected.groupby(expected.index // 30).sum()
+ expected.index = pd.timedelta_range('0 days',freq='30T',periods=50)
+
+ df = DataFrame({'A' : np.arange(1480)},index=pd.to_timedelta(np.arange(1480),unit='T'))
+ result = df.resample('30T',how='sum')
+
+ assert_frame_equal(result, expected)
+
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 48d3f3a551055..02d25bc71be58 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -265,6 +265,10 @@ def test_timedelta_range(self):
result = timedelta_range('1 days, 00:00:02',periods=5,freq='2D')
tm.assert_index_equal(result, expected)
+ expected = to_timedelta(np.arange(50),unit='T')*30
+ result = timedelta_range('0 days',freq='30T',periods=50)
+ tm.assert_index_equal(result, expected)
+
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
| - [x] docs
```
In [4]: pd.set_option('max_rows',10)
In [5]: df = DataFrame({'A' : np.arange(1480)},index=pd.to_timedelta(np.arange(1480),unit='T'))
In [6]: df
Out[6]:
A
0 days 00:00:00 0
0 days 00:01:00 1
0 days 00:02:00 2
0 days 00:03:00 3
0 days 00:04:00 4
... ...
1 days 00:35:00 1475
1 days 00:36:00 1476
1 days 00:37:00 1477
1 days 00:38:00 1478
1 days 00:39:00 1479
[1480 rows x 1 columns]
In [7]: df.resample('30T',how='sum')
Out[7]:
A
0 days 00:00:00 435
0 days 00:30:00 1335
0 days 01:00:00 2235
0 days 01:30:00 3135
0 days 02:00:00 4035
... ...
0 days 22:30:00 40935
0 days 23:00:00 41835
0 days 23:30:00 42735
1 days 00:00:00 43635
1 days 00:30:00 14745
[50 rows x 1 columns]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/8345 | 2014-09-21T22:28:34Z | 2014-09-22T19:29:24Z | 2014-09-22T19:29:24Z | 2014-09-22T19:34:15Z |
CLN: pep8 clean up of sql.py | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 184c1a0104703..29ff08391e0e4 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -1,13 +1,14 @@
+# -*- coding: utf-8 -*-
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
+
from __future__ import print_function, division
-from datetime import datetime, date, timedelta
+from datetime import datetime, date
import warnings
import traceback
-import itertools
import re
import numpy as np
@@ -15,12 +16,13 @@
import pandas.core.common as com
from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
from pandas.core.api import DataFrame, Series
-from pandas.core.common import notnull, isnull
+from pandas.core.common import isnull
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
from contextlib import contextmanager
+
class SQLAlchemyRequired(ImportError):
pass
@@ -34,6 +36,7 @@ class DatabaseError(IOError):
_SQLALCHEMY_INSTALLED = None
+
def _is_sqlalchemy_engine(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
@@ -80,7 +83,8 @@ def _handle_date_column(col, format=None):
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, coerce=True, unit=format)
- elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer):
+ elif (issubclass(col.dtype.type, np.floating)
+ or issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, coerce=True, unit=format)
@@ -89,8 +93,9 @@ def _handle_date_column(col, format=None):
def _parse_date_columns(data_frame, parse_dates):
- """ Force non-datetime columns to be read as such.
- Supports both string formatted and integer timestamp columns
+ """
+ Force non-datetime columns to be read as such.
+ Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
@@ -152,6 +157,7 @@ def _safe_fetch(cur):
if excName == 'OperationalError':
return []
+
def tquery(sql, con=None, cur=None, retry=True):
"""
DEPRECATED. Returns list of tuples corresponding to each row in given sql
@@ -209,8 +215,8 @@ def tquery(sql, con=None, cur=None, retry=True):
def uquery(sql, con=None, cur=None, retry=True, params=None):
"""
- DEPRECATED. Does the same thing as tquery, but instead of returning results, it
- returns the number of rows affected. Good for update queries.
+ DEPRECATED. Does the same thing as tquery, but instead of returning
+ results, it returns the number of rows affected. Good for update queries.
To obtain the same result in the future, you can use the following:
@@ -269,8 +275,8 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
con : SQLAlchemy engine
Sqlite DBAPI connection mode not supported
schema : string, default None
- Name of SQL schema in database to query (if database flavor supports this).
- If None, use default schema (default).
+ Name of SQL schema in database to query (if database flavor
+ supports this). If None, use default schema (default).
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
@@ -343,7 +349,7 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method. The syntax used
- to pass parameters is database driver dependent. Check your
+ to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
@@ -393,7 +399,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method. The syntax used
- to pass parameters is database driver dependent. Check your
+ to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
@@ -469,8 +475,8 @@ def to_sql(frame, name, con, flavor='sqlite', schema=None, if_exists='fail',
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
schema : string, default None
- Name of SQL schema in database to write to (if database flavor supports
- this). If None, use default schema (default).
+ Name of SQL schema in database to write to (if database flavor
+ supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
@@ -482,7 +488,7 @@ def to_sql(frame, name, con, flavor='sqlite', schema=None, if_exists='fail',
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
- If not None, then rows will be written in batches of this size at a
+ If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
"""
@@ -535,7 +541,9 @@ def has_table(table_name, con, flavor='sqlite', schema=None):
"and will be removed in future versions. "
"MySQL will be further supported with SQLAlchemy engines.")
-def pandasSQL_builder(con, flavor=None, schema=None, meta=None, is_cursor=False):
+
+def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
+ is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
@@ -622,7 +630,7 @@ def insert_data(self):
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
-
+
column_names = list(map(str, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
@@ -631,7 +639,8 @@ def insert_data(self):
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
- # convert to microsecond resolution so this yields datetime.datetime
+ # convert to microsecond resolution so this yields
+ # datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.values, dtype=object)
@@ -647,7 +656,7 @@ def insert_data(self):
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
- data = [dict( (k, v) for k, v in zip(keys, row) ) for row in data_iter]
+ data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
@@ -658,11 +667,11 @@ def insert(self, chunksize=None):
if nrows == 0:
return
- if chunksize is None:
+ if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
-
+
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
@@ -715,7 +724,8 @@ def _index_name(self, index, index_label):
else:
return index_label
# return the used column labels for the index columns
- if nlevels == 1 and 'index' not in self.frame.columns and self.frame.index.name is None:
+ if (nlevels == 1 and 'index' not in self.frame.columns
+ and self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
@@ -739,7 +749,7 @@ def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types += [
(str(self.frame.columns[i]),
- dtype_mapper(self.frame.iloc[:,i]),
+ dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
@@ -756,9 +766,8 @@ def _create_table_setup(self):
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
- columns.append(PrimaryKeyConstraint(self.keys,
- name=self.name+'_pk'))
-
+ pkc = PrimaryKeyConstraint(self.keys, name=self.name + '_pk')
+ columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
@@ -770,17 +779,16 @@ def _create_table_setup(self):
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
- """ Make a data_frame's column type align with an sql_table
- column types
- Need to work around limited NA value support.
- Floats are always fine, ints must always
- be floats if there are Null values.
- Booleans are hard because converting bool column with None replaces
- all Nones with false. Therefore only convert bool if there are no
- NA values.
- Datetimes should already be converted
- to np.datetime if supported, but here we also force conversion
- if required
+ """
+ Make the DataFrame's column types align with the SQL table
+ column types.
+ Need to work around limited NA value support. Floats are always
+ fine, ints must always be floats if there are Null values.
+ Booleans are hard because converting bool column with None replaces
+ all Nones with false. Therefore only convert bool if there are no
+ NA values.
+ Datetimes should already be converted to np.datetime64 if supported,
+ but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
@@ -823,7 +831,7 @@ def _harmonize_columns(self, parse_dates=None):
def _sqlalchemy_type(self, col):
from sqlalchemy.types import (BigInteger, Float, Text, Boolean,
- DateTime, Date, Time, Interval)
+ DateTime, Date, Time)
if com.is_datetime64_dtype(col):
try:
@@ -874,12 +882,12 @@ class PandasSQL(PandasObject):
"""
def read_sql(self, *args, **kwargs):
- raise ValueError(
- "PandasSQL must be created with an SQLAlchemy engine or connection+sql flavor")
+ raise ValueError("PandasSQL must be created with an SQLAlchemy engine"
+ " or connection+sql flavor")
def to_sql(self, *args, **kwargs):
- raise ValueError(
- "PandasSQL must be created with an SQLAlchemy engine or connection+sql flavor")
+ raise ValueError("PandasSQL must be created with an SQLAlchemy engine"
+ " or connection+sql flavor")
class PandasSQLAlchemy(PandasSQL):
@@ -897,7 +905,7 @@ def __init__(self, engine, schema=None, meta=None):
self.meta = meta
def run_transaction(self):
- return self.engine.begin()
+ return self.engine.begin()
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy engine"""
@@ -964,8 +972,8 @@ def drop_table(self, table_name, schema=None):
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None):
- table = PandasSQLTable(table_name, self, frame=frame, index=False,
- keys=keys)
+ table = PandasSQLTable(table_name, self, frame=frame, index=False,
+ keys=keys)
return str(table.sql_schema())
@@ -1025,9 +1033,11 @@ def _create_sql_schema(self, frame, table_name, keys=None):
class PandasSQLTableLegacy(PandasSQLTable):
- """Patch the PandasSQLTable for legacy support.
- Instead of a table variable just use the Create Table
- statement"""
+ """
+ Patch the PandasSQLTable for legacy support.
+ Instead of a table variable just use the Create Table statement.
+ """
+
def sql_schema(self):
return str(";\n".join(self.table))
@@ -1058,11 +1068,11 @@ def _execute_insert(self, conn, keys, data_iter):
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
- """Return a list of SQL statement that create a table reflecting the
+ """
+ Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
-
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
@@ -1159,15 +1169,15 @@ def execute(self, *args, **kwargs):
else:
cur.execute(*args)
return cur
- except Exception as e:
+ except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
- ex = DatabaseError(
- "Execution failed on sql: %s\n%s\nunable to rollback" % (args[0], e))
+ ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
+ " to rollback" % (args[0], exc))
raise_with_traceback(ex)
- ex = DatabaseError("Execution failed on sql '%s': %s" % (args[0], e))
+ ex = DatabaseError("Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
def read_sql(self, sql, index_col=None, coerce_float=True, params=None,
@@ -1213,11 +1223,11 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
- Ignored parameter included for compatability with SQLAlchemy version
- of `to_sql`.
+ Ignored parameter included for compatability with SQLAlchemy
+ version of ``to_sql``.
chunksize : int, default None
- If not None, then rows will be written in batches of this size at a
- time. If None, all rows will be written at once.
+ If not None, then rows will be written in batches of this
+ size at a time. If None, all rows will be written at once.
"""
table = PandasSQLTableLegacy(
@@ -1243,8 +1253,8 @@ def drop_table(self, name, schema=None):
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None):
- table = PandasSQLTableLegacy(table_name, self, frame=frame, index=False,
- keys=keys)
+ table = PandasSQLTableLegacy(table_name, self, frame=frame,
+ index=False, keys=keys)
return str(table.sql_schema())
| https://api.github.com/repos/pandas-dev/pandas/pulls/8340 | 2014-09-21T15:59:08Z | 2014-09-21T17:25:41Z | 2014-09-21T17:25:41Z | 2014-09-21T23:29:18Z | |
PERF: add copy=True argument to pd.concat to enable pass-thru concats with complete blocks (GH8252) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 3385696255512..c66cda58fa8a0 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -276,6 +276,7 @@ API changes
Index(['a','b','c']).difference(Index(['b','c','d']))
- ``DataFrame.info()`` now ends its output with a newline character (:issue:`8114`)
+- add ``copy=True`` argument to ``pd.concat`` to enable pass thrue of complete blocks (:issue:`8252`)
.. _whatsnew_0150.dt:
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index dc7f9893f24d3..7b7446a86dd0b 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -666,7 +666,7 @@ def _sort_labels(uniques, left, right):
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
- keys=None, levels=None, names=None, verify_integrity=False):
+ keys=None, levels=None, names=None, verify_integrity=False, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes. Can also add a layer of hierarchical indexing on the
@@ -704,6 +704,8 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the the index values on the other
axes are still respected in the join.
+ copy : boolean, default True
+ If False, do not copy data unnecessarily
Notes
-----
@@ -716,7 +718,8 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
- verify_integrity=verify_integrity)
+ verify_integrity=verify_integrity,
+ copy=copy)
return op.get_result()
@@ -727,7 +730,7 @@ class _Concatenator(object):
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
- ignore_index=False, verify_integrity=False):
+ ignore_index=False, verify_integrity=False, copy=True):
if not isinstance(objs, (list,tuple,types.GeneratorType,dict,TextFileReader)):
raise TypeError('first argument must be a list-like of pandas '
'objects, you passed an object of type '
@@ -846,6 +849,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
+ self.copy = copy
self.new_axes = self._get_new_axes()
@@ -879,7 +883,9 @@ def get_result(self):
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
- mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=True)
+ mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy)
+ if not self.copy:
+ new_data._consolidate_inplace()
return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat')
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 69b9436a5769b..89ff07bb7fa4c 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -1485,6 +1485,38 @@ def test_append_missing_column_proper_upcast(self):
self.assertEqual(appended['A'].dtype, 'f8')
self.assertEqual(appended['B'].dtype, 'O')
+ def test_concat_copy(self):
+
+ df = DataFrame(np.random.randn(4, 3))
+ df2 = DataFrame(np.random.randint(0,10,size=4).reshape(4,1))
+ df3 = DataFrame({5 : 'foo'},index=range(4))
+
+ # these are actual copies
+ result = concat([df,df2,df3],axis=1,copy=True)
+ for b in result._data.blocks:
+ self.assertIsNone(b.values.base)
+
+ # these are the same
+ result = concat([df,df2,df3],axis=1,copy=False)
+ for b in result._data.blocks:
+ if b.is_float:
+ self.assertTrue(b.values.base is df._data.blocks[0].values.base)
+ elif b.is_integer:
+ self.assertTrue(b.values.base is df2._data.blocks[0].values.base)
+ elif b.is_object:
+ self.assertIsNotNone(b.values.base)
+
+ # float block was consolidated
+ df4 = DataFrame(np.random.randn(4,1))
+ result = concat([df,df2,df3,df4],axis=1,copy=False)
+ for b in result._data.blocks:
+ if b.is_float:
+ self.assertIsNone(b.values.base)
+ elif b.is_integer:
+ self.assertTrue(b.values.base is df2._data.blocks[0].values.base)
+ elif b.is_object:
+ self.assertIsNotNone(b.values.base)
+
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
| closes #8252
| https://api.github.com/repos/pandas-dev/pandas/pulls/8331 | 2014-09-20T16:00:41Z | 2014-09-20T16:50:06Z | 2014-09-20T16:50:06Z | 2014-09-20T16:50:06Z |
ENH: add chunksize argument to read_sql (GH2908) | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 1cd1f9f5bf10f..5490e666904f9 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3411,6 +3411,18 @@ Of course, you can specify a more "complex" query.
pd.read_sql_query("SELECT id, Col_1, Col_2 FROM data WHERE id = 42;", engine)
+The func:`~pandas.read_sql_query` function supports a ``chunksize`` argument.
+Specifying this will return an iterator through chunks of the query result:
+
+.. ipython:: python
+
+ df = pd.DataFrame(np.random.randn(20, 3), columns=list('abc'))
+ df.to_sql('data_chunks', engine, index=False)
+
+.. ipython:: python
+
+ for chunk in pd.read_sql_query("SELECT * FROM data_chunks", engine, chunksize):
+ print(chunk)
You can also run a plain query without creating a dataframe with
:func:`~pandas.io.sql.execute`. This is useful for queries that don't return values,
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index cec3148a1f9fa..3cb7b7d5e8b69 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -801,7 +801,8 @@ Deprecations
Enhancements
~~~~~~~~~~~~
-- Added support for a ``chunksize`` parameter to ``to_sql`` function. This allows DataFrame to be written in chunks and avoid packet-size overflow errors (:issue:`8062`)
+- Added support for a ``chunksize`` parameter to ``to_sql`` function. This allows DataFrame to be written in chunks and avoid packet-size overflow errors (:issue:`8062`).
+- Added support for a ``chunksize`` parameter to ``read_sql`` function. Specifying this argument will return an iterator through chunks of the query result (:issue:`2908`).
- Added support for writing ``datetime.date`` and ``datetime.time`` object columns with ``to_sql`` (:issue:`6932`).
- Added support for specifying a ``schema`` to read from/write to with ``read_sql_table`` and ``to_sql`` (:issue:`7441`, :issue:`7952`).
For example:
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 53b664458527a..09acfcaee976b 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -32,7 +32,7 @@ class DatabaseError(IOError):
#------------------------------------------------------------------------------
-# Helper functions
+#--- Helper functions
_SQLALCHEMY_INSTALLED = None
@@ -115,6 +115,21 @@ def _parse_date_columns(data_frame, parse_dates):
return data_frame
+def _wrap_result(data, columns, index_col=None, coerce_float=True,
+ parse_dates=None):
+ """Wrap result set of query in a DataFrame """
+
+ frame = DataFrame.from_records(data, columns=columns,
+ coerce_float=coerce_float)
+
+ _parse_date_columns(frame, parse_dates)
+
+ if index_col is not None:
+ frame.set_index(index_col, inplace=True)
+
+ return frame
+
+
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
@@ -262,7 +277,8 @@ def uquery(sql, con=None, cur=None, retry=True, params=None):
#--- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
- coerce_float=True, parse_dates=None, columns=None):
+ coerce_float=True, parse_dates=None, columns=None,
+ chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy engine, returns a DataFrame.
@@ -293,6 +309,9 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
such as SQLite
columns : list
List of column names to select from sql table
+ chunksize : int, default None
+ If specified, return an iterator where `chunksize` is the number of
+ rows to include in each chunk.
Returns
-------
@@ -318,7 +337,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
- parse_dates=parse_dates, columns=columns)
+ parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
@@ -327,7 +346,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
- parse_dates=None):
+ parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
@@ -362,6 +381,9 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
+ chunksize : int, default None
+ If specified, return an iterator where `chunksize` is the number of
+ rows to include in each chunk.
Returns
-------
@@ -376,11 +398,11 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
- parse_dates=parse_dates)
+ parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
- parse_dates=None, columns=None):
+ parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
@@ -415,6 +437,9 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
columns : list
List of column names to select from sql table (only used when reading
a table).
+ chunksize : int, default None
+ If specified, return an iterator where `chunksize` is the
+ number of rows to include in each chunk.
Returns
-------
@@ -438,7 +463,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
- coerce_float=coerce_float, parse_dates=parse_dates)
+ coerce_float=coerce_float, parse_dates=parse_dates,
+ chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
@@ -449,11 +475,12 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
- parse_dates=parse_dates, columns=columns)
+ parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
- coerce_float=coerce_float, parse_dates=parse_dates)
+ coerce_float=coerce_float, parse_dates=parse_dates,
+ chunksize=chunksize)
def to_sql(frame, name, con, flavor='sqlite', schema=None, if_exists='fail',
@@ -684,7 +711,27 @@ def insert(self, chunksize=None):
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
- def read(self, coerce_float=True, parse_dates=None, columns=None):
+ def _query_iterator(self, result, chunksize, columns, coerce_float=True,
+ parse_dates=None):
+ """Return generator through chunked result set"""
+
+ while True:
+ data = result.fetchmany(chunksize)
+ if not data:
+ break
+ else:
+ self.frame = DataFrame.from_records(
+ data, columns=columns, coerce_float=coerce_float)
+
+ self._harmonize_columns(parse_dates=parse_dates)
+
+ if self.index is not None:
+ self.frame.set_index(self.index, inplace=True)
+
+ yield self.frame
+
+ def read(self, coerce_float=True, parse_dates=None, columns=None,
+ chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
@@ -696,18 +743,23 @@ def read(self, coerce_float=True, parse_dates=None, columns=None):
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
- data = result.fetchall()
column_names = result.keys()
- self.frame = DataFrame.from_records(
- data, columns=column_names, coerce_float=coerce_float)
+ if chunksize is not None:
+ return self._query_iterator(result, chunksize, column_names,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates)
+ else:
+ data = result.fetchall()
+ self.frame = DataFrame.from_records(
+ data, columns=column_names, coerce_float=coerce_float)
- self._harmonize_columns(parse_dates=parse_dates)
+ self._harmonize_columns(parse_dates=parse_dates)
- if self.index is not None:
- self.frame.set_index(self.index, inplace=True)
+ if self.index is not None:
+ self.frame.set_index(self.index, inplace=True)
- return self.frame
+ return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
@@ -898,8 +950,8 @@ class SQLDatabase(PandasSQL):
Parameters
----------
engine : SQLAlchemy engine
- Engine to connect with the database. Using SQLAlchemy makes it possible to use any DB supported by that
- library.
+ Engine to connect with the database. Using SQLAlchemy makes it
+ possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
@@ -926,9 +978,10 @@ def execute(self, *args, **kwargs):
return self.engine.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
- parse_dates=None, columns=None, schema=None):
+ parse_dates=None, columns=None, schema=None,
+ chunksize=None):
"""Read SQL database table into a DataFrame.
-
+
Parameters
----------
table_name : string
@@ -936,15 +989,16 @@ def read_table(self, table_name, index_col=None, coerce_float=True,
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
- Attempt to convert values to non-string, non-numeric objects (like
- decimal.Decimal) to floating point. Can result in loss of Precision.
+ Attempt to convert values to non-string, non-numeric objects
+ (like decimal.Decimal) to floating point. This can result in
+ loss of precision.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
- to the keyword arguments of :func:`pandas.to_datetime`
+ - Dict of ``{column_name: arg}``, where the arg corresponds
+ to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
@@ -953,6 +1007,9 @@ def read_table(self, table_name, index_col=None, coerce_float=True,
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
+ chunksize : int, default None
+ If specified, return an iterator where `chunksize` is the number
+ of rows to include in each chunk.
Returns
-------
@@ -966,10 +1023,25 @@ def read_table(self, table_name, index_col=None, coerce_float=True,
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
- parse_dates=parse_dates, columns=columns)
-
+ parse_dates=parse_dates, columns=columns,
+ chunksize=chunksize)
+
+ @staticmethod
+ def _query_iterator(result, chunksize, columns, index_col=None,
+ coerce_float=True, parse_dates=None):
+ """Return generator through chunked result set"""
+
+ while True:
+ data = result.fetchmany(chunksize)
+ if not data:
+ break
+ else:
+ yield _wrap_result(data, columns, index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates)
+
def read_query(self, sql, index_col=None, coerce_float=True,
- parse_dates=None, params=None):
+ parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
@@ -1006,30 +1078,31 @@ def read_query(self, sql, index_col=None, coerce_float=True,
read_sql_table : Read SQL database table into a DataFrame
read_sql
- """
+ """
args = _convert_params(sql, params)
result = self.execute(*args)
- data = result.fetchall()
columns = result.keys()
- data_frame = DataFrame.from_records(
- data, columns=columns, coerce_float=coerce_float)
-
- _parse_date_columns(data_frame, parse_dates)
-
- if index_col is not None:
- data_frame.set_index(index_col, inplace=True)
+ if chunksize is not None:
+ return self._query_iterator(result, chunksize, columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates)
+ else:
+ data = result.fetchall()
+ frame = _wrap_result(data, columns, index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates)
+ return frame
- return data_frame
-
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None):
"""
Write records stored in a DataFrame to a SQL database.
-
+
Parameters
----------
frame : DataFrame
@@ -1308,23 +1381,42 @@ def execute(self, *args, **kwargs):
ex = DatabaseError("Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
+ @staticmethod
+ def _query_iterator(cursor, chunksize, columns, index_col=None,
+ coerce_float=True, parse_dates=None):
+ """Return generator through chunked result set"""
+
+ while True:
+ data = cursor.fetchmany(chunksize)
+ if not data:
+ cursor.close()
+ break
+ else:
+ yield _wrap_result(data, columns, index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates)
+
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
- parse_dates=None):
+ parse_dates=None, chunksize=None):
+
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
- data = self._fetchall_as_list(cursor)
- cursor.close()
- data_frame = DataFrame.from_records(
- data, columns=columns, coerce_float=coerce_float)
+ if chunksize is not None:
+ return self._query_iterator(cursor, chunksize, columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates)
+ else:
+ data = self._fetchall_as_list(cursor)
+ cursor.close()
- _parse_date_columns(data_frame, parse_dates)
+ frame = _wrap_result(data, columns, index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates)
+ return frame
- if index_col is not None:
- data_frame.set_index(index_col, inplace=True)
- return data_frame
-
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index e116a14fa9625..2099a8d0de82e 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -379,6 +379,7 @@ class _TestSQLApi(PandasSQLTest):
"""
flavor = 'sqlite'
+ mode = None
def setUp(self):
self.conn = self.connect()
@@ -643,6 +644,40 @@ def test_get_schema(self):
con=self.conn)
self.assertTrue('CREATE' in create_sql)
+ def test_chunksize_read(self):
+ df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
+ df.to_sql('test_chunksize', self.conn, index=False)
+
+ # reading the query in one time
+ res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
+
+ # reading the query in chunks with read_sql_query
+ res2 = DataFrame()
+ i = 0
+ sizes = [5, 5, 5, 5, 2]
+
+ for chunk in sql.read_sql_query("select * from test_chunksize",
+ self.conn, chunksize=5):
+ res2 = concat([res2, chunk], ignore_index=True)
+ self.assertEqual(len(chunk), sizes[i])
+ i += 1
+
+ tm.assert_frame_equal(res1, res2)
+
+ # reading the query in chunks with read_sql_query
+ if self.mode == 'sqlalchemy':
+ res3 = DataFrame()
+ i = 0
+ sizes = [5, 5, 5, 5, 2]
+
+ for chunk in sql.read_sql_table("test_chunksize", self.conn,
+ chunksize=5):
+ res3 = concat([res3, chunk], ignore_index=True)
+ self.assertEqual(len(chunk), sizes[i])
+ i += 1
+
+ tm.assert_frame_equal(res1, res3)
+
class TestSQLApi(_TestSQLApi):
"""
@@ -653,6 +688,7 @@ class TestSQLApi(_TestSQLApi):
"""
flavor = 'sqlite'
+ mode = 'sqlalchemy'
def connect(self):
if SQLALCHEMY_INSTALLED:
@@ -742,6 +778,7 @@ class TestSQLiteFallbackApi(_TestSQLApi):
"""
flavor = 'sqlite'
+ mode = 'fallback'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
| Closes #2908
Should probably add this also to `read_sql_table`. And try to reduce some repetitions (`_wrap_result` can maybe be a top-level (in the file) helper function).
| https://api.github.com/repos/pandas-dev/pandas/pulls/8330 | 2014-09-20T15:23:47Z | 2014-10-07T00:15:24Z | 2014-10-07T00:15:24Z | 2014-10-07T00:15:24Z |
BUG: Bug in casting when setting a column in a same-dtype block (GH7704) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 627d8f7bd40be..a4403699c9045 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -912,7 +912,7 @@ Bug Fixes
a custom line terminator or ``delim_whitespace=True`` (:issue:`8122`).
- Bug in ``read_html`` where empty tables caused a ``StopIteration`` (:issue:`7575`)
-
+- Bug in casting when setting a column in a same-dtype block (:issue:`7704`)
- Bug in accessing groups from a ``GroupBy`` when the original grouper
was a tuple (:issue:`8121`).
- Bug in ``.at`` that would accept integer indexers on a non-integer index and do fallback (:issue:`7814`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 2877c2b10c92f..053b92b2ad547 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -556,9 +556,15 @@ def setitem(self, indexer, value):
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
- return [make_block(transf(values),
+ block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
- fastpath=True)]
+ fastpath=True)
+
+ # may have to soft convert_objects here
+ if block.is_object and not self.is_object:
+ block = block.convert(convert_numeric=False)
+
+ return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index dd6cf773dcdb7..b4b8e4263ec78 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -534,6 +534,15 @@ def test_setitem_cast(self):
self.frame['something'] = 2.5
self.assertEqual(self.frame['something'].dtype, np.float64)
+ # GH 7704
+ # dtype conversion on setting
+ df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
+ df['event'] = np.nan
+ df.loc[10,'event'] = 'foo'
+ result = df.get_dtype_counts().order()
+ expected = Series({'float64' : 3, 'object' : 1 }).order()
+ assert_series_equal(result, expected)
+
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
| closes #7704
| https://api.github.com/repos/pandas-dev/pandas/pulls/8322 | 2014-09-19T17:04:28Z | 2014-09-19T20:05:55Z | 2014-09-19T20:05:55Z | 2014-09-19T20:05:55Z |
BUG: Bug in .at that would accept integer indexers on a non-integer index and do fallback (GH7814) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 74cffa7859a1d..f748facf795de 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -909,7 +909,7 @@ Bug Fixes
- Bug in accessing groups from a ``GroupBy`` when the original grouper
was a tuple (:issue:`8121`).
-
+- Bug in ``.at`` that would accept integer indexers on a non-integer index and do fallback (:issue:`7814`)
- Bug with kde plot and NaNs (:issue:`8182`)
- Bug in ``GroupBy.count`` with float32 data type were nan values were not excluded (:issue:`8169`).
- Bug with stacked barplots and NaNs (:issue:`8175`).
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 15bf8e8807836..a99d056419ad2 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1510,6 +1510,18 @@ class _AtIndexer(_ScalarAccessIndexer):
""" label based scalar accessor """
_takeable = False
+ def _convert_key(self, key):
+ """ require they keys to be the same type as the index (so we don't fallback) """
+ for ax, i in zip(self.obj.axes, key):
+ if ax.is_integer():
+ if not com.is_integer(i):
+ raise ValueError("At based indexing on an integer index can only have integer "
+ "indexers")
+ else:
+ if com.is_integer(i):
+ raise ValueError("At based indexing on an non-integer index can only have non-integer "
+ "indexers")
+ return key
class _iAtIndexer(_ScalarAccessIndexer):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 7f2907761990a..27a6b844bccb5 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -866,6 +866,28 @@ def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
+ # at should not fallback
+ # GH 7814
+ s = Series([1,2,3], index=list('abc'))
+ result = s.at['a']
+ self.assertEquals(result, 1)
+ self.assertRaises(ValueError, lambda : s.at[0])
+
+ df = DataFrame({'A' : [1,2,3]},index=list('abc'))
+ result = df.at['a','A']
+ self.assertEquals(result, 1)
+ self.assertRaises(ValueError, lambda : df.at['a',0])
+
+ s = Series([1,2,3], index=[3,2,1])
+ result = s.at[1]
+ self.assertEquals(result, 3)
+ self.assertRaises(ValueError, lambda : s.at['a'])
+
+ df = DataFrame({0 : [1,2,3]},index=[3,2,1])
+ result = df.at[1,0]
+ self.assertEquals(result, 3)
+ self.assertRaises(ValueError, lambda : df.at['a',0])
+
def test_loc_getitem_label_slice(self):
# label slices (with ints)
| closes #7814
| https://api.github.com/repos/pandas-dev/pandas/pulls/8320 | 2014-09-19T15:04:14Z | 2014-09-19T15:40:59Z | 2014-09-19T15:40:58Z | 2014-09-19T15:40:59Z |
BUG: Exception raised when `to_sql` used to insert empty dataframe | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 6ead268b32cd6..184c1a0104703 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -654,8 +654,15 @@ def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
+
+ if nrows == 0:
+ return
+
if chunksize is None:
chunksize = nrows
+ elif chunksize == 0:
+ raise ValueError('chunksize argument should be non-zero')
+
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index f02c701d97bcf..217114a00e980 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -253,6 +253,10 @@ def _to_sql(self):
# Nuke table
self.drop_table('test_frame1')
+ def _to_sql_empty(self):
+ self.drop_table('test_frame1')
+ self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')
+
def _to_sql_fail(self):
self.drop_table('test_frame1')
@@ -850,6 +854,9 @@ def test_read_sql_named_parameter(self):
def test_to_sql(self):
self._to_sql()
+ def test_to_sql_empty(self):
+ self._to_sql_empty()
+
def test_to_sql_fail(self):
self._to_sql_fail()
@@ -1346,6 +1353,9 @@ def test_read_sql_named_parameter(self):
def test_to_sql(self):
self._to_sql()
+ def test_to_sql_empty(self):
+ self._to_sql_empty()
+
def test_to_sql_fail(self):
self._to_sql_fail()
| Right now, if inserting an empty table (no rows) using `to_sql`, the following exception is raised:
```
...
File "/Users/artemy/dev/pandas/pandas/io/sql.py", line 1220, in to_sql
table.insert(chunksize)
File "/Users/artemy/dev/pandas/pandas/io/sql.py", line 659, in insert
chunks = int(nrows / chunksize) + 1
ZeroDivisionError: division by zero
```
This fixes this bug and adds tests for it.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8316 | 2014-09-19T00:13:00Z | 2014-09-19T09:21:07Z | 2014-09-19T09:21:07Z | 2014-09-19T09:21:07Z |
VIS: Hide labels for NaN/zeros in boxplt [WIP] | diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index fffb4f279878f..bcdac274d7282 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -628,6 +628,13 @@ def test_pie_series(self):
ax = _check_plot_works(series.plot, kind='pie')
self._check_text_labels(ax.texts, series.index)
+ def test_pie_nan(self):
+ s = Series([1, np.nan, 1, 1])
+ ax = s.plot(kind='pie', legend=True)
+ expected = ['0', '', '2', '3']
+ result = [x.get_text() for x in ax.texts]
+ self.assertEqual(result, expected)
+
@slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
@@ -2717,6 +2724,22 @@ def test_pie_df(self):
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
+ def test_pie_df_nan(self):
+ df = DataFrame(np.random.rand(4, 4))
+ for i in range(4):
+ df.iloc[i, i] = np.nan
+ fig, axes = self.plt.subplots(ncols=4)
+ df.plot(kind='pie', subplots=True, ax=axes, legend=True)
+
+ base_expected = ['0', '1', '2', '3']
+ for i, ax in enumerate(axes):
+ expected = list(base_expected) # copy
+ expected[i] = ''
+ result = [x.get_text() for x in ax.texts]
+ self.assertEqual(result, expected)
+ # legend labels
+ self.assertEqual([x.get_text() for x in ax.get_legend().get_texts()],
+ base_expected)
def test_errorbar_plot(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 0c150074a9298..5e11b459d20d8 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -2033,10 +2033,20 @@ def _make_plot(self):
kwds = self.kwds.copy()
+ def blank_labeler(label, value):
+ if value == 0:
+ return ''
+ else:
+ return label
+
idx = [com.pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
- results = ax.pie(y, labels=labels, **kwds)
+ # Blank out labels for values of 0 so they don't overlap
+ # with nonzero wedges
+ blabels = [blank_labeler(label, value) for
+ label, value in zip(labels, y)]
+ results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
| Closes: https://github.com/pydata/pandas/issues/8198
The idea is to hide the labels around the pie plot so they don't overlap w/ nonzero labels. The NaN/zero labels should still be visible in the legend to indicate that there's missing data. There's a picture of the problem in the linked issue.
NaNs in the top row of plots:
```
In [1]: df = pd.DataFrame(np.random.rand(4, 4))
In [2]: df2 = df.copy()
In [3]: for i in range(len(df)):
...: df.iloc[i, i] = np.nan
...:
In [4]: fig, axes = plt.subplots(figsize=(16, 8), nrows=2, ncols=4)
In [5]: df.plot(kind='pie', subplots=True, ax=axes.ravel()[:4])
In [6]: df2.plot(kind='pie', subplots=True, ax=axes.ravel()[4:])
```

Right now, for **DataFrames only** the NaN/zero labels aren't being put in the legend (or they aren't showing up). Still need to figure out why. It works fine for Series:
```
In [8]: df[0].plot(kind='pie', figsize=(4,4), legend=True)
```

| https://api.github.com/repos/pandas-dev/pandas/pulls/8307 | 2014-09-18T13:14:37Z | 2014-09-25T16:55:01Z | 2014-09-25T16:55:01Z | 2016-11-03T12:38:15Z |
VIS: default LinePlot rotation of 0 | diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 32892a431cd29..e1c64e641e1e7 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -1118,6 +1118,7 @@ or columns needed, given the other.
The above example is identical to using
.. ipython:: python
+
df.plot(subplots=True, layout=(-1, 3), figsize=(6, 6));
The required number of rows (2) is inferred from the number of series to plot
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 1e8cf4d700f39..92bd0c7c0893d 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -573,6 +573,10 @@ def test_bar_ignore_index(self):
def test_rotation(self):
df = DataFrame(randn(5, 5))
+ # Default rot 0
+ axes = df.plot()
+ self._check_ticks_props(axes, xrot=0)
+
axes = df.plot(rot=30)
self._check_ticks_props(axes, xrot=30)
@@ -974,7 +978,7 @@ def test_plot(self):
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
- self._check_ticks_props(ax, xrot=30)
+ self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
@@ -1178,7 +1182,7 @@ def test_subplots_timeseries(self):
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
- self._check_ticks_props(axes, xrot=30)
+ self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index f8d7a16e686b7..cfba295d2816e 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -799,7 +799,11 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
if rot is not None:
self.rot = rot
+ # need to know for format_date_labels since it's rotated to 30 by
+ # default
+ self._rot_set = True
else:
+ self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
@@ -1498,7 +1502,7 @@ def _post_plot_logic(self):
class LinePlot(MPLPlot):
- _default_rot = 30
+ _default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
@@ -1679,6 +1683,10 @@ def _post_plot_logic(self):
for ax in self.axes:
if condition:
+ # irregular TS rotated 30 deg. by default
+ # probably a better place to check / set this.
+ if not self._rot_set:
+ self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None:
| Closes https://github.com/pydata/pandas/issues/8150
``` python
In [1]: s = pd.Series([1,2,3])
In [2]: s.plot()
Out[2]: <matplotlib.axes._subplots.AxesSubplot at 0x10da02d68>
```

No release notes since this is just changing it back to how it was for the last release.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8301 | 2014-09-17T20:41:41Z | 2014-09-19T17:04:46Z | 2014-09-19T17:04:46Z | 2016-11-03T12:38:20Z |
TST: suppress gratuitous warnings in test_moments.py | diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 94c2521ff6938..9ea67e36b094f 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -1,6 +1,7 @@
import nose
import sys
import functools
+import warnings
from datetime import datetime
from numpy.random import randn
@@ -39,6 +40,8 @@ def setUp(self):
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
+ warnings.simplefilter("ignore", category=FutureWarning)
+
def test_centered_axis_validation(self):
# ok
mom.rolling_mean(Series(np.ones(10)),3,center=True ,axis=0)
@@ -234,17 +237,20 @@ def alt(x):
self._check_moment_func(f, alt)
def test_rolling_apply(self):
- ser = Series([])
- assert_series_equal(
- ser, mom.rolling_apply(ser, 10, lambda x: x.mean()))
+ # suppress warnings about empty slices, as we are deliberately testing with a 0-length Series
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
- def roll_mean(x, window, min_periods=None, freq=None, center=False):
- return mom.rolling_apply(x, window,
- lambda x: x[np.isfinite(x)].mean(),
- min_periods=min_periods,
- freq=freq,
- center=center)
- self._check_moment_func(roll_mean, np.mean)
+ ser = Series([])
+ assert_series_equal(ser, mom.rolling_apply(ser, 10, lambda x: x.mean()))
+
+ def roll_mean(x, window, min_periods=None, freq=None, center=False):
+ return mom.rolling_apply(x, window,
+ lambda x: x[np.isfinite(x)].mean(),
+ min_periods=min_periods,
+ freq=freq,
+ center=center)
+ self._check_moment_func(roll_mean, np.mean)
# GH 8080
s = Series([None, None, None])
@@ -920,55 +926,59 @@ def test_expanding_consistency(self):
(mom.expanding_median, np.median, None),
]
- for min_periods in [0, 1, 2, 3, 4]:
+ # suppress warnings about empty slices, as we are deliberately testing with empty/0-length Series/DataFrames
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
+
+ for min_periods in [0, 1, 2, 3, 4]:
+
+ # test consistency between different expanding_* moments
+ self._test_moments_consistency(
+ min_periods=min_periods,
+ count=mom.expanding_count,
+ mean=lambda x: mom.expanding_mean(x, min_periods=min_periods),
+ mock_mean=lambda x: mom.expanding_sum(x, min_periods=min_periods) / mom.expanding_count(x),
+ corr=lambda x, y: mom.expanding_corr(x, y, min_periods=min_periods),
+ var_unbiased=lambda x: mom.expanding_var(x, min_periods=min_periods),
+ std_unbiased=lambda x: mom.expanding_std(x, min_periods=min_periods),
+ cov_unbiased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods),
+ var_biased=lambda x: mom.expanding_var(x, min_periods=min_periods, ddof=0),
+ std_biased=lambda x: mom.expanding_std(x, min_periods=min_periods, ddof=0),
+ cov_biased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods, ddof=0),
+ var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
+ )
+
+ # test consistency between expanding_xyz() and either (a) expanding_apply of Series.xyz(),
+ # or (b) expanding_apply of np.nanxyz()
+ for x in self._test_data():
+ assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
+ functions = base_functions
+ # GH 8269
+ if x.notnull().all().all():
+ functions = base_functions + no_nan_functions
+ for (expanding_f, f, require_min_periods) in functions:
+ if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
+ continue
- # test consistency between different expanding_* moments
- self._test_moments_consistency(
- min_periods=min_periods,
- count=mom.expanding_count,
- mean=lambda x: mom.expanding_mean(x, min_periods=min_periods),
- mock_mean=lambda x: mom.expanding_sum(x, min_periods=min_periods) / mom.expanding_count(x),
- corr=lambda x, y: mom.expanding_corr(x, y, min_periods=min_periods),
- var_unbiased=lambda x: mom.expanding_var(x, min_periods=min_periods),
- std_unbiased=lambda x: mom.expanding_std(x, min_periods=min_periods),
- cov_unbiased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods),
- var_biased=lambda x: mom.expanding_var(x, min_periods=min_periods, ddof=0),
- std_biased=lambda x: mom.expanding_std(x, min_periods=min_periods, ddof=0),
- cov_biased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods, ddof=0),
- var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
- )
-
- # test consistency between expanding_xyz() and either (a) expanding_apply of Series.xyz(),
- # or (b) expanding_apply of np.nanxyz()
- for x in self._test_data():
- assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
- functions = base_functions
- # GH 8269
- if x.notnull().all().all():
- functions = base_functions + no_nan_functions
- for (expanding_f, f, require_min_periods) in functions:
- if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
- continue
-
- if expanding_f is mom.expanding_count:
- expanding_f_result = expanding_f(x)
- expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=0)
- else:
- if expanding_f in [mom.expanding_cov, mom.expanding_corr]:
- expanding_f_result = expanding_f(x, min_periods=min_periods, pairwise=False)
+ if expanding_f is mom.expanding_count:
+ expanding_f_result = expanding_f(x)
+ expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=0)
else:
- expanding_f_result = expanding_f(x, min_periods=min_periods)
- expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods)
- assert_equal(expanding_f_result, expanding_apply_f_result)
-
- if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame):
- # test pairwise=True
- expanding_f_result = expanding_f(x, x, min_periods=min_periods, pairwise=True)
- expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
- for i, _ in enumerate(x.columns):
- for j, _ in enumerate(x.columns):
- expected.iloc[:, i, j] = expanding_f(x.iloc[:, i], x.iloc[:, j], min_periods=min_periods)
- assert_panel_equal(expanding_f_result, expected)
+ if expanding_f in [mom.expanding_cov, mom.expanding_corr]:
+ expanding_f_result = expanding_f(x, min_periods=min_periods, pairwise=False)
+ else:
+ expanding_f_result = expanding_f(x, min_periods=min_periods)
+ expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods)
+ assert_equal(expanding_f_result, expanding_apply_f_result)
+
+ if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame):
+ # test pairwise=True
+ expanding_f_result = expanding_f(x, x, min_periods=min_periods, pairwise=True)
+ expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
+ for i, _ in enumerate(x.columns):
+ for j, _ in enumerate(x.columns):
+ expected.iloc[:, i, j] = expanding_f(x.iloc[:, i], x.iloc[:, j], min_periods=min_periods)
+ assert_panel_equal(expanding_f_result, expected)
@slow
def test_rolling_consistency(self):
@@ -1512,102 +1522,106 @@ def test_pairwise_stats_column_names_order(self):
df2 = DataFrame([[None,1,1],[None,1,2],[None,3,2],[None,8,1]], columns=['Y','Z','X'])
s = Series([1,1,3,8])
- # DataFrame methods (which do not call _flex_binary_moment())
- for f in [lambda x: x.cov(),
- lambda x: x.corr(),
- ]:
- results = [f(df) for df in df1s]
- for (df, result) in zip(df1s, results):
- assert_index_equal(result.index, df.columns)
- assert_index_equal(result.columns, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
-
- # DataFrame with itself, pairwise=True
- for f in [lambda x: mom.expanding_cov(x, pairwise=True),
- lambda x: mom.expanding_corr(x, pairwise=True),
- lambda x: mom.rolling_cov(x, window=3, pairwise=True),
- lambda x: mom.rolling_corr(x, window=3, pairwise=True),
- lambda x: mom.ewmcov(x, com=3, pairwise=True),
- lambda x: mom.ewmcorr(x, com=3, pairwise=True),
- ]:
- results = [f(df) for df in df1s]
- for (df, result) in zip(df1s, results):
- assert_index_equal(result.items, df.index)
- assert_index_equal(result.major_axis, df.columns)
- assert_index_equal(result.minor_axis, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
-
- # DataFrame with itself, pairwise=False
- for f in [lambda x: mom.expanding_cov(x, pairwise=False),
- lambda x: mom.expanding_corr(x, pairwise=False),
- lambda x: mom.rolling_cov(x, window=3, pairwise=False),
- lambda x: mom.rolling_corr(x, window=3, pairwise=False),
- lambda x: mom.ewmcov(x, com=3, pairwise=False),
- lambda x: mom.ewmcorr(x, com=3, pairwise=False),
- ]:
- results = [f(df) for df in df1s]
- for (df, result) in zip(df1s, results):
- assert_index_equal(result.index, df.index)
- assert_index_equal(result.columns, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
-
- # DataFrame with another DataFrame, pairwise=True
- for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=True),
- lambda x, y: mom.expanding_corr(x, y, pairwise=True),
- lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=True),
- lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=True),
- lambda x, y: mom.ewmcov(x, y, com=3, pairwise=True),
- lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=True),
- ]:
- results = [f(df, df2) for df in df1s]
- for (df, result) in zip(df1s, results):
- assert_index_equal(result.items, df.index)
- assert_index_equal(result.major_axis, df.columns)
- assert_index_equal(result.minor_axis, df2.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
-
- # DataFrame with another DataFrame, pairwise=False
- for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=False),
- lambda x, y: mom.expanding_corr(x, y, pairwise=False),
- lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=False),
- lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=False),
- lambda x, y: mom.ewmcov(x, y, com=3, pairwise=False),
- lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=False),
- ]:
- results = [f(df, df2) if df.columns.is_unique else None for df in df1s]
- for (df, result) in zip(df1s, results):
- if result is not None:
- expected_index = df.index.union(df2.index)
- expected_columns = df.columns.union(df2.columns)
- assert_index_equal(result.index, expected_index)
- assert_index_equal(result.columns, expected_columns)
- else:
- tm.assertRaisesRegexp(ValueError, "'arg1' columns are not unique", f, df, df2)
- tm.assertRaisesRegexp(ValueError, "'arg2' columns are not unique", f, df2, df)
-
- # DataFrame with a Series
- for f in [lambda x, y: mom.expanding_cov(x, y),
- lambda x, y: mom.expanding_corr(x, y),
- lambda x, y: mom.rolling_cov(x, y, window=3),
- lambda x, y: mom.rolling_corr(x, y, window=3),
- lambda x, y: mom.ewmcov(x, y, com=3),
- lambda x, y: mom.ewmcorr(x, y, com=3),
- ]:
- results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
- for (df, result) in zip(df1s, results):
- assert_index_equal(result.index, df.index)
- assert_index_equal(result.columns, df.columns)
- for i, result in enumerate(results):
- if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
+ # suppress warnings about incomparable objects, as we are deliberately testing with such column labels
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", message=".*incomparable objects.*", category=RuntimeWarning)
+
+ # DataFrame methods (which do not call _flex_binary_moment())
+ for f in [lambda x: x.cov(),
+ lambda x: x.corr(),
+ ]:
+ results = [f(df) for df in df1s]
+ for (df, result) in zip(df1s, results):
+ assert_index_equal(result.index, df.columns)
+ assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.assert_numpy_array_equivalent(result, results[0])
+
+ # DataFrame with itself, pairwise=True
+ for f in [lambda x: mom.expanding_cov(x, pairwise=True),
+ lambda x: mom.expanding_corr(x, pairwise=True),
+ lambda x: mom.rolling_cov(x, window=3, pairwise=True),
+ lambda x: mom.rolling_corr(x, window=3, pairwise=True),
+ lambda x: mom.ewmcov(x, com=3, pairwise=True),
+ lambda x: mom.ewmcorr(x, com=3, pairwise=True),
+ ]:
+ results = [f(df) for df in df1s]
+ for (df, result) in zip(df1s, results):
+ assert_index_equal(result.items, df.index)
+ assert_index_equal(result.major_axis, df.columns)
+ assert_index_equal(result.minor_axis, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.assert_numpy_array_equivalent(result, results[0])
+
+ # DataFrame with itself, pairwise=False
+ for f in [lambda x: mom.expanding_cov(x, pairwise=False),
+ lambda x: mom.expanding_corr(x, pairwise=False),
+ lambda x: mom.rolling_cov(x, window=3, pairwise=False),
+ lambda x: mom.rolling_corr(x, window=3, pairwise=False),
+ lambda x: mom.ewmcov(x, com=3, pairwise=False),
+ lambda x: mom.ewmcorr(x, com=3, pairwise=False),
+ ]:
+ results = [f(df) for df in df1s]
+ for (df, result) in zip(df1s, results):
+ assert_index_equal(result.index, df.index)
+ assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.assert_numpy_array_equivalent(result, results[0])
+
+ # DataFrame with another DataFrame, pairwise=True
+ for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=True),
+ lambda x, y: mom.expanding_corr(x, y, pairwise=True),
+ lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=True),
+ lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=True),
+ lambda x, y: mom.ewmcov(x, y, com=3, pairwise=True),
+ lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=True),
+ ]:
+ results = [f(df, df2) for df in df1s]
+ for (df, result) in zip(df1s, results):
+ assert_index_equal(result.items, df.index)
+ assert_index_equal(result.major_axis, df.columns)
+ assert_index_equal(result.minor_axis, df2.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.assert_numpy_array_equivalent(result, results[0])
+
+ # DataFrame with another DataFrame, pairwise=False
+ for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=False),
+ lambda x, y: mom.expanding_corr(x, y, pairwise=False),
+ lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=False),
+ lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=False),
+ lambda x, y: mom.ewmcov(x, y, com=3, pairwise=False),
+ lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=False),
+ ]:
+ results = [f(df, df2) if df.columns.is_unique else None for df in df1s]
+ for (df, result) in zip(df1s, results):
+ if result is not None:
+ expected_index = df.index.union(df2.index)
+ expected_columns = df.columns.union(df2.columns)
+ assert_index_equal(result.index, expected_index)
+ assert_index_equal(result.columns, expected_columns)
+ else:
+ tm.assertRaisesRegexp(ValueError, "'arg1' columns are not unique", f, df, df2)
+ tm.assertRaisesRegexp(ValueError, "'arg2' columns are not unique", f, df2, df)
+
+ # DataFrame with a Series
+ for f in [lambda x, y: mom.expanding_cov(x, y),
+ lambda x, y: mom.expanding_corr(x, y),
+ lambda x, y: mom.rolling_cov(x, y, window=3),
+ lambda x, y: mom.rolling_corr(x, y, window=3),
+ lambda x, y: mom.ewmcov(x, y, com=3),
+ lambda x, y: mom.ewmcorr(x, y, com=3),
+ ]:
+ results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
+ for (df, result) in zip(df1s, results):
+ assert_index_equal(result.index, df.index)
+ assert_index_equal(result.columns, df.columns)
+ for i, result in enumerate(results):
+ if i > 0:
+ self.assert_numpy_array_equivalent(result, results[0])
def test_rolling_skew_edge_cases(self):
| This PR suppresses expected warnings in test_moments.py, which are otherwise distracting.
Note that the changes are not nearly as extensive as they look. I simply indented blocks of code under newly added `with warnings.catch_warnings():`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8299 | 2014-09-17T19:42:01Z | 2014-09-23T15:35:39Z | 2014-09-23T15:35:39Z | 2014-09-23T15:39:55Z |
BUG: Bug in iat return boxing for Timestamp/Timedelta (GH7729) | diff --git a/pandas/core/api.py b/pandas/core/api.py
index b7e02917cd476..a8b10342593ce 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -25,6 +25,7 @@
from pandas.tseries.tools import to_datetime
from pandas.tseries.index import (DatetimeIndex, Timestamp,
date_range, bdate_range)
+from pandas.tseries.tdi import TimedeltaIndex, Timedelta
from pandas.tseries.period import Period, PeriodIndex
# legacy
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 244dcbcde32dc..8f880cebe9391 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1827,9 +1827,8 @@ def _possibly_convert_objects(values, convert_dates=True,
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
- from pandas.tseries.timedeltas import \
- _possibly_cast_to_timedelta
- values = _possibly_cast_to_timedelta(values, coerce=True)
+ from pandas.tseries.timedeltas import to_timedelta
+ values = to_timedelta(values, coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
@@ -1889,7 +1888,7 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
- from pandas.tseries.timedeltas import _possibly_cast_to_timedelta
+ from pandas.tseries.timedeltas import to_timedelta
from pandas.tseries.tools import to_datetime
if dtype is not None:
@@ -1931,8 +1930,7 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
if is_datetime64:
value = to_datetime(value, coerce=coerce).values
elif is_timedelta64:
- value = _possibly_cast_to_timedelta(value,
- dtype=dtype)
+ value = to_timedelta(value, coerce=coerce).values
except (AttributeError, ValueError):
pass
@@ -1949,7 +1947,7 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
- value = _possibly_cast_to_timedelta(value)
+ value = to_timedelta(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
@@ -2005,16 +2003,7 @@ def _try_timedelta(v):
try:
return to_timedelta(v).values.reshape(shape)
except:
-
- # this is for compat with numpy < 1.7
- # but string-likes will fail here
-
- from pandas.tseries.timedeltas import \
- _possibly_cast_to_timedelta
- try:
- return _possibly_cast_to_timedelta(v, coerce='compat').reshape(shape)
- except:
- return v
+ return v
# do a quick inference for perf
sample = v[:min(3,len(v))]
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index cad49aa68a250..7e0dae91f465d 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -314,7 +314,7 @@ def _validate(self):
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
- from pandas.tseries.timedeltas import _possibly_cast_to_timedelta
+ from pandas.tseries.timedeltas import to_timedelta
coerce = True
if not is_list_like(values):
@@ -337,7 +337,7 @@ def _convert_to_array(self, values, name=None, other=None):
values = tslib.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
- values = _possibly_cast_to_timedelta(values, coerce=coerce, dtype='timedelta64[ns]')
+ values = to_timedelta(values, coerce=coerce)
elif inferred_type == 'integer':
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == 'm':
@@ -356,7 +356,7 @@ def _convert_to_array(self, values, name=None, other=None):
"datetime/timedelta operations [{0}]".format(
', '.join([com.pprint_thing(v)
for v in values[mask]])))
- values = _possibly_cast_to_timedelta(os, coerce=coerce)
+ values = to_timedelta(os, coerce=coerce)
elif inferred_type == 'floating':
# all nan, so ok, use the other dtype (e.g. timedelta or datetime)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 4137b58885802..078bf0def241e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -20,7 +20,8 @@
_possibly_cast_to_datetime, _possibly_castable,
_possibly_convert_platform, _try_sort,
ABCSparseArray, _maybe_match_name, _coerce_to_dtype,
- _ensure_object, SettingWithCopyError)
+ _ensure_object, SettingWithCopyError,
+ _maybe_box_datetimelike)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
_ensure_index)
from pandas.core.indexing import _check_bool_indexer, _maybe_convert_indices
@@ -781,7 +782,7 @@ def get_value(self, label, takeable=False):
value : scalar value
"""
if takeable is True:
- return self.values[label]
+ return _maybe_box_datetimelike(self.values[label])
return self.index.get_value(self.values, label)
def set_value(self, label, value, takeable=False):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 17bffcae056cf..7f2907761990a 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -14,7 +14,7 @@
import pandas.core.common as com
from pandas import option_context
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
- MultiIndex, Float64Index, Timestamp)
+ MultiIndex, Float64Index, Timestamp, Timedelta)
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal,
assert_attr_equal)
@@ -322,7 +322,7 @@ def _check(f, func, values = False):
_check(d['ts'], 'at')
_check(d['floats'],'at')
- def test_at_timestamp(self):
+ def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
@@ -333,6 +333,22 @@ def test_at_timestamp(self):
xp = s.values[5]
self.assertEqual(result, xp)
+ # GH 7729
+ # make sure we are boxing the returns
+ s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
+ expected = Timestamp('2014-02-02')
+
+ for r in [ lambda : s.iat[1], lambda : s.iloc[1] ]:
+ result = r()
+ self.assertEqual(result, expected)
+
+ s = Series(['1 days','2 days'], dtype='timedelta64[ns]')
+ expected = Timedelta('2 days')
+
+ for r in [ lambda : s.iat[1], lambda : s.iloc[1] ]:
+ result = r()
+ self.assertEqual(result, expected)
+
def test_iat_invalid_args(self):
pass
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 2e8442be7bbcc..0b863f9662e14 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3286,15 +3286,12 @@ def test_bfill(self):
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_sub_of_datetime_from_TimeSeries(self):
- from pandas.tseries.timedeltas import _possibly_cast_to_timedelta
+ from pandas.tseries.timedeltas import to_timedelta
from datetime import datetime
a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))
b = datetime(1993, 6, 22, 13, 30)
a = Series([a])
- result = _possibly_cast_to_timedelta(np.abs(a - b))
- self.assertEqual(result.dtype, 'timedelta64[ns]')
-
- result = _possibly_cast_to_timedelta(np.abs(b - a))
+ result = to_timedelta(np.abs(a - b))
self.assertEqual(result.dtype, 'timedelta64[ns]')
def test_datetime64_with_index(self):
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index bf64b1bd703f9..367ea276646ee 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -213,13 +213,9 @@ def test_sub_isub(self):
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
- result_add = rng - other
- result_union = rng.diff(other)
+ result_union = rng.difference(other)
- tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
- rng -= other
- tm.assert_index_equal(rng, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
@@ -859,13 +855,8 @@ def test_sub_isub(self):
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7),]:
- result_add = rng - other
- result_union = rng.diff(other)
-
- tm.assert_index_equal(result_add, expected)
+ result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
- rng -= other
- tm.assert_index_equal(rng, expected)
# offset
# DateOffset
diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py
index ad8c2c0f09ea1..dc60f5024c9ed 100644
--- a/pandas/tseries/timedeltas.py
+++ b/pandas/tseries/timedeltas.py
@@ -12,7 +12,7 @@
is_timedelta64_dtype, _values_from_object,
is_list_like, isnull, _ensure_object)
-def to_timedelta(arg, unit='ns', box=True):
+def to_timedelta(arg, unit='ns', box=True, coerce=False):
"""
Convert argument to timedelta
@@ -23,6 +23,7 @@ def to_timedelta(arg, unit='ns', box=True):
box : boolean, default True
If True returns a Timedelta/TimedeltaIndex of the results
if False returns a np.timedelta64 or ndarray of values of dtype timedelta64[ns]
+ coerce : force errors to NaT (False by default)
Returns
-------
@@ -43,14 +44,14 @@ def _convert_listlike(arg, box, unit):
value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]')
else:
try:
- value = tslib.array_to_timedelta64(_ensure_object(arg), unit=unit)
+ value = tslib.array_to_timedelta64(_ensure_object(arg), unit=unit, coerce=coerce)
except:
# try to process strings fast; may need to fallback
try:
value = np.array([ _get_string_converter(r, unit=unit)() for r in arg ],dtype='m8[ns]')
except:
- value = np.array([ _coerce_scalar_to_timedelta_type(r, unit=unit) for r in arg ])
+ value = np.array([ _coerce_scalar_to_timedelta_type(r, unit=unit, coerce=coerce) for r in arg ])
if box:
from pandas import TimedeltaIndex
@@ -67,7 +68,7 @@ def _convert_listlike(arg, box, unit):
return _convert_listlike(arg, box=box, unit=unit)
# ...so it must be a scalar value. Return scalar.
- return _coerce_scalar_to_timedelta_type(arg, unit=unit, box=box)
+ return _coerce_scalar_to_timedelta_type(arg, unit=unit, box=box, coerce=coerce)
_unit_map = {
'Y' : 'Y',
@@ -135,7 +136,7 @@ def _validate_timedelta_unit(arg):
_full_search2 = re.compile(''.join(
["^\s*(?P<neg>-?)\s*"] + [ "(?P<" + p + ">\\d+\.?\d*\s*(" + ss + "))?\\s*" for p, ss in abbrevs ] + ['$']))
-def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True):
+def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, coerce=False):
""" convert strings to timedelta; coerce to Timedelta (if box), else np.timedelta64"""
if isinstance(r, compat.string_types):
@@ -145,7 +146,7 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True):
r = converter()
unit='ns'
- result = tslib.convert_to_timedelta(r,unit)
+ result = tslib.convert_to_timedelta(r,unit,coerce)
if box:
result = tslib.Timedelta(result)
@@ -262,32 +263,3 @@ def convert(r=None, unit=None, m=m):
# no converter
raise ValueError("cannot create timedelta string converter for [{0}]".format(r))
-def _possibly_cast_to_timedelta(value, coerce=True, dtype=None):
- """ try to cast to timedelta64, if already a timedeltalike, then make
- sure that we are [ns] (as numpy 1.6.2 is very buggy in this regards,
- don't force the conversion unless coerce is True
-
- if dtype is passed then this is the target dtype
- """
-
- # deal with numpy not being able to handle certain timedelta operations
- if isinstance(value, (ABCSeries, np.ndarray)):
-
- # i8 conversions
- if value.dtype == 'int64' and np.dtype(dtype) == 'timedelta64[ns]':
- value = value.astype('timedelta64[ns]')
- return value
- elif value.dtype.kind == 'm':
- if value.dtype != 'timedelta64[ns]':
- value = value.astype('timedelta64[ns]')
- return value
-
- # we don't have a timedelta, but we want to try to convert to one (but
- # don't force it)
- if coerce:
- new_value = tslib.array_to_timedelta64(
- _values_from_object(value).astype(object), coerce=False)
- if new_value.dtype == 'i8':
- value = np.array(new_value, dtype='timedelta64[ns]')
-
- return value
diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
index c74c35fd07f5e..e88bb906dc966 100644
--- a/pandas/util/decorators.py
+++ b/pandas/util/decorators.py
@@ -77,7 +77,7 @@ def wrapper(*args, **kwargs):
else:
new_arg_value = old_arg_value
msg = "the '%s' keyword is deprecated, " \
- "use '%s' instead" % (old_arg_name, new_arg_name)
+ "use '%s' instead" % (old_arg_name, new_arg_name)
warnings.warn(msg, FutureWarning)
if kwargs.get(new_arg_name, None) is not None:
msg = "Can only specify '%s' or '%s', not both" % \
| CLN/COMPAT: cleanup timedelta64[ns] series inferrence
closes #7729
| https://api.github.com/repos/pandas-dev/pandas/pulls/8298 | 2014-09-17T19:14:57Z | 2014-09-17T20:46:01Z | 2014-09-17T20:46:01Z | 2014-09-17T20:46:01Z |
API: accept -1 for layout in plot functions | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 7d5327a68ee2e..74cffa7859a1d 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -624,7 +624,9 @@ Enhancements
- Added support for bool, uint8, uint16 and uint32 datatypes in ``to_stata`` (:issue:`7097`, :issue:`7365`)
-- Added ``layout`` keyword to ``DataFrame.plot`` (:issue:`6667`)
+- Added ``layout`` keyword to ``DataFrame.plot``. You can pass a tuple of
+ ``(rows, columns)``, one of which can be ``-1`` to automatically
+ infer (:issue:`6667`, :issue:`8071`).
- Allow to pass multiple axes to ``DataFrame.plot``, ``hist`` and ``boxplot`` (:issue:`5353`, :issue:`6970`, :issue:`7069`)
- Added support for ``c``, ``colormap`` and ``colorbar`` arguments for
``DataFrame.plot`` with ``kind='scatter'`` (:issue:`7780`)
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index b64388df62bd7..32892a431cd29 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -1106,13 +1106,23 @@ The layout of subplots can be specified by ``layout`` keyword. It can accept
The number of axes which can be contained by rows x columns specified by ``layout`` must be
larger than the number of required subplots. If layout can contain more axes than required,
-blank axes are not drawn.
+blank axes are not drawn. Similar to a numpy array's ``reshape`` method, you
+can use ``-1`` for one dimension to automatically calculate the number of rows
+or columns needed, given the other.
.. ipython:: python
@savefig frame_plot_subplots_layout.png
df.plot(subplots=True, layout=(2, 3), figsize=(6, 6));
+The above example is identical to using
+
+.. ipython:: python
+ df.plot(subplots=True, layout=(-1, 3), figsize=(6, 6));
+
+The required number of rows (2) is inferred from the number of series to plot
+and the given number of columns (3).
+
Also, you can pass multiple axes created beforehand as list-like via ``ax`` keyword.
This allows to use more complicated layout.
The passed axes must be the same number as the subplots being drawn.
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 7ee532d7b1d3a..1e8cf4d700f39 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -472,6 +472,11 @@ def test_plot(self):
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
+ ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
+ self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
+ ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
+ self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
+
@slow
def test_plot_figsize_and_title(self):
# figsize and title
@@ -677,9 +682,21 @@ def test_hist_layout_with_by(self):
axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
+ axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1))
+ self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
+
axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
+ axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1))
+ self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
+
+ axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1))
+ self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
+
+ axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4))
+ self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
+
axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
@@ -927,7 +944,11 @@ def test_plot(self):
_check_plot_works(df.plot, grid=False)
axes = _check_plot_works(df.plot, subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
- _check_plot_works(df.plot, subplots=True, use_index=False)
+
+ axes = _check_plot_works(df.plot, subplots=True, layout=(-1, 2))
+ self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
+
+ axes = _check_plot_works(df.plot, subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
@@ -985,6 +1006,9 @@ def test_plot(self):
axes = _check_plot_works(df.plot, kind='bar', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+ axes = _check_plot_works(df.plot, kind='bar', subplots=True,
+ layout=(-1, 1))
+ self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
@@ -1174,12 +1198,30 @@ def test_subplots_layout(self):
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
+ axes = df.plot(subplots=True, layout=(-1, 2))
+ self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+ self.assertEqual(axes.shape, (2, 2))
+
+ axes = df.plot(subplots=True, layout=(2, -1))
+ self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+ self.assertEqual(axes.shape, (2, 2))
+
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
self.assertEqual(axes.shape, (1, 4))
+ axes = df.plot(subplots=True, layout=(-1, 4))
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
+ self.assertEqual(axes.shape, (1, 4))
+
+ axes = df.plot(subplots=True, layout=(4, -1))
+ self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
+ self.assertEqual(axes.shape, (4, 1))
+
with tm.assertRaises(ValueError):
axes = df.plot(subplots=True, layout=(1, 1))
+ with tm.assertRaises(ValueError):
+ axes = df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
@@ -1228,6 +1270,14 @@ def test_subplots_multiple_axes(self):
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
+ returned = df.plot(subplots=True, ax=axes, layout=(2, -1))
+ self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
+ self.assertEqual(returned.shape, (4, ))
+
+ returned = df.plot(subplots=True, ax=axes, layout=(-1, 2))
+ self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
+ self.assertEqual(returned.shape, (4, ))
+
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
@@ -2135,6 +2185,10 @@ def test_hist_layout(self):
{'layout': (4, 1), 'expected_size': (4, 1)},
{'layout': (1, 4), 'expected_size': (1, 4)},
{'layout': (3, 3), 'expected_size': (3, 3)},
+ {'layout': (-1, 4), 'expected_size': (1, 4)},
+ {'layout': (4, -1), 'expected_size': (4, 1)},
+ {'layout': (-1, 2), 'expected_size': (2, 2)},
+ {'layout': (2, -1), 'expected_size': (2, 2)}
)
for layout_test in layout_to_expected_size:
@@ -2149,6 +2203,9 @@ def test_hist_layout(self):
# invalid format for layout
with tm.assertRaises(ValueError):
df.hist(layout=(1,))
+ with tm.assertRaises(ValueError):
+ df.hist(layout=(-1, -1))
+
@slow
def test_scatter(self):
@@ -3048,6 +3105,8 @@ def test_grouped_box_layout(self):
by=df.gender, layout=(1, 1))
self.assertRaises(ValueError, df.boxplot, column=['height', 'weight', 'category'],
layout=(2, 1), return_type='dict')
+ self.assertRaises(ValueError, df.boxplot, column=['weight', 'height'],
+ by=df.gender, layout=(-1, -1))
box = _check_plot_works(df.groupby('gender').boxplot, column='height',
return_type='dict')
@@ -3080,15 +3139,29 @@ def test_grouped_box_layout(self):
box = _check_plot_works(df.groupby('category').boxplot, column='height',
layout=(3, 2), return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
+ box = _check_plot_works(df.groupby('category').boxplot, column='height',
+ layout=(3, -1), return_type='dict')
+ self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
- box = df.boxplot(column=['height', 'weight', 'category'], by='gender', layout=(4, 1))
+ box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
+ layout=(4, 1))
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
+ box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
+ layout=(-1, 1))
+ self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
+
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], layout=(1, 4),
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
+ box = df.groupby('classroom').boxplot(
+ column=['height', 'weight', 'category'], layout=(1, -1),
+ return_type='dict')
+ self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
+
+
@slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
@@ -3132,13 +3205,23 @@ def test_grouped_hist_layout(self):
layout=(1, 1))
self.assertRaises(ValueError, df.hist, column='height', by=df.category,
layout=(1, 3))
+ self.assertRaises(ValueError, df.hist, column='height', by=df.category,
+ layout=(-1, -1))
+
+ axes = _check_plot_works(df.hist, column='height', by=df.gender,
+ layout=(2, 1))
+ self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
- axes = _check_plot_works(df.hist, column='height', by=df.gender, layout=(2, 1))
+ axes = _check_plot_works(df.hist, column='height', by=df.gender,
+ layout=(2, -1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = df.hist(column='height', by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
+ axes = df.hist(column='height', by=df.category, layout=(-1, 1))
+ self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
+
axes = df.hist(column='height', by=df.category, layout=(4, 2), figsize=(12, 8))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8))
tm.close()
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 9556e6b81c356..f8d7a16e686b7 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -3,6 +3,7 @@
import datetime
import warnings
import re
+from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
@@ -3059,6 +3060,17 @@ def _get_layout(nplots, layout=None, layout_type='box'):
raise ValueError('Layout must be a tuple of (rows, columns)')
nrows, ncols = layout
+
+ # Python 2 compat
+ ceil_ = lambda x: int(ceil(x))
+ if nrows == -1 and ncols >0:
+ layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
+ elif ncols == -1 and nrows > 0:
+ layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
+ elif ncols <= 0 and nrows <= 0:
+ msg = "At least one dimension of layout must be positive"
+ raise ValueError(msg)
+
if nrows * ncols < nplots:
raise ValueError('Layout of %sx%s must be larger than required size %s' %
(nrows, ncols, nplots))
| Closes https://github.com/pydata/pandas/issues/8071#issuecomment-55906171
``` python
In [1]: df = pd.DataFrame(np.random.randn(20, 3))
In [2]: df.plot(subplots=True, layout=(-1, 2))
Out[2]:
array([[<matplotlib.axes._subplots.AxesSubplot object at 0x116b134e0>,
<matplotlib.axes._subplots.AxesSubplot object at 0x117819240>],
[<matplotlib.axes._subplots.AxesSubplot object at 0x11786b6a0>,
<matplotlib.axes._subplots.AxesSubplot object at 0x1178a5ba8>]], dtype=object)
```

| https://api.github.com/repos/pandas-dev/pandas/pulls/8297 | 2014-09-17T18:59:06Z | 2014-09-18T17:16:32Z | 2014-09-18T17:16:32Z | 2016-11-03T12:38:19Z |
ERR: DataFrame.from_records raises misleading exception on shape mismatch | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 9db0293044d2d..2877c2b10c92f 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -3507,7 +3507,7 @@ def create_block_manager_from_arrays(arrays, names, axes):
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
- construction_error(len(arrays), arrays[0].shape[1:], axes, e)
+ construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index b68fd62f4b1d7..dd6cf773dcdb7 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4032,7 +4032,7 @@ def test_from_records_to_records(self):
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
- msg = r'Shape of passed values is \(3,\), indices imply \(3, 1\)'
+ msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame.from_records(arr, index=index[:-1])
| Currently, `DataFrame.from_records` raises a misleading exception when the shape of the input data do not match the shape implied by the axes:
``` python
In [1]: import pandas as pd
In [2]: pd.DataFrame.from_records([(0, 1), (0, 1), (0, 1)], index=[0, 1],
...: columns=['A', 'B'])
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-b8c920ac0f12> in <module>()
1 pd.DataFrame.from_records([(0, 1), (0, 1), (0, 1)], index=[0, 1],
----> 2 columns=['A', 'B'])
/Users/afni/homebrew/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/core/frame.pyc in from_records(cls, data, index, exclude, columns, coerce_float, nrows)
833
834 mgr = _arrays_to_mgr(arrays, arr_columns, result_index,
--> 835 columns)
836
837 return cls(mgr)
/Users/afni/homebrew/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/core/frame.pyc in _arrays_to_mgr(arrays, arr_names, index, columns, dtype)
4628 axes = [_ensure_index(columns), _ensure_index(index)]
4629
-> 4630 return create_block_manager_from_arrays(arrays, arr_names, axes)
4631
4632
/Users/afni/homebrew/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/core/internals.pyc in create_block_manager_from_arrays(arrays, names, axes)
3238 return mgr
3239 except (ValueError) as e:
-> 3240 construction_error(len(arrays), arrays[0].shape[1:], axes, e)
3241
3242
/Users/afni/homebrew/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/core/internals.pyc in construction_error(tot_items, block_shape, axes, e)
3209 raise e
3210 raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
-> 3211 passed,implied))
3212
3213
ValueError: Shape of passed values is (2,), indices imply (2, 2)
```
This error message leads the user to believe that the error is internal (after all, the input data were _definitely_ 2-dimensional) and hides the true error, which is that the shape of the passed values is actually `(2, 3)`.
For some reason, this behavior was explicitly specified by a test.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8294 | 2014-09-17T16:22:35Z | 2014-09-17T20:40:28Z | 2014-09-17T20:40:28Z | 2014-09-17T20:41:33Z |
PERF: Improve performance CustmBusinessDay - 2nd | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 9d03b7b38bea7..5f8c2e7dcd30f 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -833,6 +833,7 @@ Performance
- Performance improvements in groupby ``.agg`` and ``.apply`` where builtins max/min were not mapped to numpy/cythonized versions (:issue:`7722`)
- Performance improvement in writing to sql (``to_sql``) of up to 50% (:issue:`8208`).
- Performance benchmarking of groupby for large value of ngroups (:issue:`6787`)
+- Performance improvement in ``CustomBusinessDay``, ``CustomBusinessMonth`` (:issue:`8236`)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 8bb5584fee7a7..55aad38c10fae 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -225,12 +225,12 @@ def _should_cache(self):
return self.isAnchored() and self._cacheable
def _params(self):
- attrs = [(k, v) for k, v in compat.iteritems(vars(self))
- if (k not in ['kwds', 'name', 'normalize',
- 'busdaycalendar']) and (k[0] != '_')]
- attrs.extend(list(self.kwds.items()))
+ all_paras = dict(list(vars(self).items()) + list(self.kwds.items()))
+ if 'holidays' in all_paras and not all_paras['holidays']:
+ all_paras.pop('holidays')
+ exclude = ['kwds', 'name','normalize', 'calendar']
+ attrs = [(k, v) for k, v in all_paras.items() if (k not in exclude ) and (k[0] != '_')]
attrs = sorted(set(attrs))
-
params = tuple([str(self.__class__)] + attrs)
return params
@@ -547,38 +547,57 @@ class CustomBusinessDay(BusinessDay):
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
- calendar : HolidayCalendar instance
- instance of AbstractHolidayCalendar that provide the list of holidays
+ calendar : pd.HolidayCalendar or np.busdaycalendar
"""
-
_cacheable = False
_prefix = 'C'
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
+ holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
- self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri')
-
- if 'calendar' in kwds:
- holidays = kwds['calendar'].holidays()
- else:
- holidays = kwds.get('holidays', [])
+ calendar, holidays = self.get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ # CustomBusinessDay instances are identified by the
+ # following two attributes. See DateOffset._params()
+ # holidays, weekmask
+
+ self.kwds['weekmask'] = self.weekmask = weekmask
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['calendar'] = self.calendar = calendar
+
+ def get_calendar(self, weekmask, holidays, calendar):
+ '''Generate busdaycalendar'''
+ if isinstance(calendar, np.busdaycalendar):
+ if not holidays:
+ holidays = tuple(calendar.holidays)
+ elif not isinstance(holidays, tuple):
+ holidays = tuple(holidays)
+ else:
+ # trust that calendar.holidays and holidays are
+ # consistent
+ pass
+ return calendar, holidays
+
+ if holidays is None:
+ holidays = []
+ try:
+ holidays = holidays + calendar.holidays().tolist()
+ except AttributeError:
+ pass
holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
holidays]
- self.holidays = tuple(sorted(holidays))
- self.kwds['holidays'] = self.holidays
+ holidays = tuple(sorted(holidays))
- self._set_busdaycalendar()
+ kwargs = {'weekmask': weekmask}
+ if holidays:
+ kwargs['holidays'] = holidays
- def _set_busdaycalendar(self):
- if self.holidays:
- kwargs = {'weekmask':self.weekmask,'holidays':self.holidays}
- else:
- kwargs = {'weekmask':self.weekmask}
try:
- self.busdaycalendar = np.busdaycalendar(**kwargs)
+ busdaycalendar = np.busdaycalendar(**kwargs)
except:
# Check we have the required numpy version
from distutils.version import LooseVersion
@@ -589,17 +608,23 @@ def _set_busdaycalendar(self):
np.__version__)
else:
raise
+ return busdaycalendar, holidays
def __getstate__(self):
"""Return a pickleable state"""
state = self.__dict__.copy()
- del state['busdaycalendar']
+ del state['calendar']
return state
def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
- self._set_busdaycalendar()
+ calendar, holidays = self.get_calendar(weekmask=self.weekmask,
+ holidays=self.holidays,
+ calendar=None)
+ self.kwds['calendar'] = self.calendar = calendar
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['weekmask'] = state['weekmask']
@apply_wraps
def apply(self, other):
@@ -613,7 +638,7 @@ def apply(self, other):
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
- busdaycal=self.busdaycalendar)
+ busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
@@ -635,7 +660,6 @@ def _to_dt64(dt, dtype='datetime64'):
# > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
# numpy.datetime64('2013-05-01T02:00:00.000000+0200')
# Thus astype is needed to cast datetime to datetime64[D]
-
if getattr(dt, 'tzinfo', None) is not None:
i8 = tslib.pydt_to_i8(dt)
dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
@@ -649,7 +673,7 @@ def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = self._to_dt64(dt,'datetime64[D]')
- return np.is_busday(day64, busdaycal=self.busdaycalendar)
+ return np.is_busday(day64, busdaycal=self.calendar)
class MonthOffset(SingleConstructorOffset):
@@ -767,7 +791,6 @@ def onOffset(self, dt):
_prefix = 'BMS'
-
class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
@@ -788,18 +811,22 @@ class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
+ calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBM'
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
+ holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
- self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri')
- self.cbday = CustomBusinessDay(n=self.n, **kwds)
- self.m_offset = MonthEnd()
+ self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
+ weekmask=weekmask, holidays=holidays,
+ calendar=calendar, **kwds)
+ self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)
+ self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
@@ -817,11 +844,11 @@ def apply(self,other):
n -= 1
elif other > cur_cmend and n <= -1:
n += 1
-
- new = cur_mend + n * MonthEnd()
+
+ new = cur_mend + n * self.m_offset
result = self.cbday.rollback(new)
return result
-
+
class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
@@ -842,18 +869,22 @@ class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
+ calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBMS'
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
+ holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
- self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri')
- self.cbday = CustomBusinessDay(n=self.n, normalize=normalize, **kwds)
- self.m_offset = MonthBegin(normalize=normalize)
+ self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
+ weekmask=weekmask, holidays=holidays,
+ calendar=calendar, **kwds)
+ self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)
+ self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
@@ -872,8 +903,8 @@ def apply(self,other):
n += 1
elif dt_in < cur_cmbegin and n >= 1:
n -= 1
-
- new = cur_mbegin + n * MonthBegin()
+
+ new = cur_mbegin + n * self.m_offset
result = self.cbday.rollforward(new)
return result
diff --git a/pandas/tseries/tests/data/cday-0.14.1.pickle b/pandas/tseries/tests/data/cday-0.14.1.pickle
new file mode 100644
index 0000000000000..48488099482e4
Binary files /dev/null and b/pandas/tseries/tests/data/cday-0.14.1.pickle differ
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index b3764b73b15ac..3b2e8f203c313 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -1,3 +1,4 @@
+import os
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from pandas.compat import range
@@ -22,6 +23,7 @@
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
+from pandas.io.pickle import read_pickle
from pandas.tslib import NaT, Timestamp
import pandas.tslib as tslib
from pandas.util.testing import assertRaisesRegexp
@@ -848,6 +850,24 @@ def test_calendar(self):
dt = datetime(2014, 1, 17)
assertEq(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
+ def test_roundtrip_pickle(self):
+ def _check_roundtrip(obj):
+ unpickled = self.round_trip_pickle(obj)
+ self.assertEqual(unpickled, obj)
+ _check_roundtrip(self.offset)
+ _check_roundtrip(self.offset2)
+ _check_roundtrip(self.offset*2)
+
+ def test_pickle_compat_0_14_1(self):
+ hdays = [datetime(2013,1,1) for ele in range(4)]
+
+ pth = tm.get_data_path()
+
+ cday0_14_1 = read_pickle(os.path.join(pth, 'cday-0.14.1.pickle'))
+ cday = CDay(holidays=hdays)
+ self.assertEqual(cday, cday0_14_1)
+
+
class CustomBusinessMonthBase(object):
_multiprocess_can_split_ = True
@@ -894,6 +914,15 @@ def test_offsets_compare_equal(self):
offset2 = self._object()
self.assertFalse(offset1 != offset2)
+ def test_roundtrip_pickle(self):
+ def _check_roundtrip(obj):
+ unpickled = self.round_trip_pickle(obj)
+ self.assertEqual(unpickled, obj)
+ _check_roundtrip(self._object())
+ _check_roundtrip(self._object(2))
+ _check_roundtrip(self._object()*2)
+
+
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_object = CBMonthEnd
@@ -1006,8 +1035,12 @@ def test_holidays(self):
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
- self.assertEqual(DatetimeIndex(start='20120101',end='20130101',freq=CBMonthEnd(calendar=USFederalHolidayCalendar())).tolist()[0],
- datetime(2012,1,31))
+ hcal = USFederalHolidayCalendar()
+ freq = CBMonthEnd(calendar=hcal)
+
+ self.assertEqual(DatetimeIndex(start='20120101',end='20130101',
+ freq=freq).tolist()[0],
+ datetime(2012,1,31))
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_object = CBMonthBegin
@@ -1120,8 +1153,11 @@ def test_holidays(self):
self.assertEqual(dt + 2*bm_offset,datetime(2012,2,3))
def test_datetimeindex(self):
- self.assertEqual(DatetimeIndex(start='20120101',end='20130101',freq=CBMonthBegin(calendar=USFederalHolidayCalendar())).tolist()[0],
- datetime(2012,1,3))
+ hcal = USFederalHolidayCalendar()
+ cbmb = CBMonthBegin(calendar=hcal)
+ self.assertEqual(DatetimeIndex(start='20120101', end='20130101',
+ freq=cbmb).tolist()[0],
+ datetime(2012,1,3))
def assertOnOffset(offset, date, expected):
diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py
index bb55b88cf1f34..c67cdabdc1a06 100644
--- a/vb_suite/timeseries.py
+++ b/vb_suite/timeseries.py
@@ -285,15 +285,20 @@ def date_range(start=None, end=None, periods=None, freq=None):
setup = common_setup + """
import datetime as dt
import pandas as pd
+import pandas.tseries.holiday
import numpy as np
date = dt.datetime(2011,1,1)
dt64 = np.datetime64('2011-01-01 09:00Z')
+hcal = pd.tseries.holiday.USFederalHolidayCalendar()
day = pd.offsets.Day()
year = pd.offsets.YearBegin()
cday = pd.offsets.CustomBusinessDay()
-cme = pd.offsets.CustomBusinessMonthEnd()
+cmb = pd.offsets.CustomBusinessMonthBegin(calendar=hcal)
+cme = pd.offsets.CustomBusinessMonthEnd(calendar=hcal)
+
+cdayh = pd.offsets.CustomBusinessDay(calendar=hcal)
"""
timeseries_day_incr = Benchmark("date + day",setup)
@@ -306,15 +311,26 @@ def date_range(start=None, end=None, periods=None, freq=None):
timeseries_custom_bday_incr = \
Benchmark("date + cday",setup)
+timeseries_custom_bday_decr = \
+ Benchmark("date - cday",setup)
+
timeseries_custom_bday_apply = \
Benchmark("cday.apply(date)",setup)
timeseries_custom_bday_apply_dt64 = \
Benchmark("cday.apply(dt64)",setup)
-# Increment by n
-timeseries_custom_bday_incr_n = \
- Benchmark("date + 10 * cday",setup)
+timeseries_custom_bday_cal_incr = \
+ Benchmark("date + 1 * cdayh",setup)
+
+timeseries_custom_bday_cal_decr = \
+ Benchmark("date - 1 * cdayh",setup)
+
+timeseries_custom_bday_cal_incr_n = \
+ Benchmark("date + 10 * cdayh",setup)
+
+timeseries_custom_bday_cal_incr_neg_n = \
+ Benchmark("date - 10 * cdayh",setup)
# Increment custom business month
timeseries_custom_bmonthend_incr = \
@@ -323,6 +339,16 @@ def date_range(start=None, end=None, periods=None, freq=None):
timeseries_custom_bmonthend_incr_n = \
Benchmark("date + 10 * cme",setup)
+timeseries_custom_bmonthend_decr_n = \
+ Benchmark("date - 10 * cme",setup)
+
+timeseries_custom_bmonthbegin_incr_n = \
+ Benchmark("date + 10 * cmb",setup)
+
+timeseries_custom_bmonthbegin_decr_n = \
+ Benchmark("date - 10 * cmb",setup)
+
+
#----------------------------------------------------------------------
# month/quarter/year start/end accessors
@@ -357,4 +383,3 @@ def iter_n(iterable, n=None):
timeseries_iter_datetimeindex_preexit = Benchmark('iter_n(idx1, M)', setup)
timeseries_iter_periodindex_preexit = Benchmark('iter_n(idx2, M)', setup)
-
| New attempt to close #8236
CustomBusinessDay is now fast with holiday calendar and incrementing in different ways. `busdaycalendar` is cached in self.kwds.
```
In [15]:
date = pd.Timestamp('20120101')
cbday = pd.offsets.CustomBusinessDay()
date + cbday
%timeit date + cbday
100000 loops, best of 3: 14.9 µs per loop
In [16]:
hdays = [dt.datetime(2013,1,1) for ele in range(1000)]
cbdayh = pd.offsets.CustomBusinessDay(holidays=hdays)
cbme = pd.offsets.CustomBusinessMonthEnd(holidays=hdays)
cbmb = pd.offsets.CustomBusinessMonthBegin(holidays=hdays)
%timeit date + cbdayh
%timeit date + 2 * cbdayh
%timeit date - 1 * cbdayh
%timeit date - 10 * cbday
%timeit date + cbmb
%timeit date + 10 * cbmb
%timeit date - 10 * cbmb
%timeit date + cbme
%timeit date + 10 * cbme
%timeit date - 10 * cbme
```
100000 loops, best of 3: 14.8 µs per loop
10000 loops, best of 3: 20.1 µs per loop
10000 loops, best of 3: 24 µs per loop
10000 loops, best of 3: 23.7 µs per loop
10000 loops, best of 3: 108 µs per loop
10000 loops, best of 3: 104 µs per loop
10000 loops, best of 3: 117 µs per loop
10000 loops, best of 3: 118 µs per loop
10000 loops, best of 3: 188 µs per loop
1000 loops, best of 3: 147 µs per loop
| https://api.github.com/repos/pandas-dev/pandas/pulls/8293 | 2014-09-17T13:16:48Z | 2014-10-04T13:37:55Z | 2014-10-04T13:37:55Z | 2014-10-04T13:38:16Z |
BUG: fix setting dataframe column to a sparse array | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 8b467d768df8b..9574ad9b66f03 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -910,3 +910,4 @@ Bug Fixes
- Bug where ``col_space`` was ignored in ``DataFrame.to_string()`` when ``header=False`` (:issue:`8230`).
- Bug with ``DatetimeIndex.asof`` incorrectly matching partial strings and returning the wrong date (:issue:`8245`).
- Bug in plotting methods modifying the global matplotlib rcParams (:issue:`8242`).
+- Bug in ``DataFrame.__setitem__`` that caused errors when setting a dataframe column to a sparse array (:issue:`8131`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 939a94c033ea0..4654ceee9896b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -43,6 +43,7 @@
from pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
+from pandas.sparse.array import SparseArray
from pandas.util.decorators import deprecate, Appender, Substitution, \
deprecate_kwarg
@@ -2164,8 +2165,8 @@ def reindexer(value):
value = np.repeat(value, len(self.index)).astype(dtype)
value = com._possibly_cast_to_datetime(value, dtype)
- # return categoricals directly
- if isinstance(value, Categorical):
+ # return unconsolidatables directly
+ if isinstance(value, (Categorical, SparseArray)):
return value
# broadcast across multiple columns if necessary
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8245d1bd0759c..b68fd62f4b1d7 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1791,6 +1791,20 @@ def test_getitem_ix_float_duplicates(self):
expect = df.iloc[[1, -1], 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
+ def test_setitem_with_sparse_value(self):
+ # GH8131
+ df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
+ sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)
+ df['new_column'] = sp_series
+ tm.assert_series_equal(df['new_column'], sp_series)
+
+ def test_setitem_with_unaligned_sparse_value(self):
+ df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
+ sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])
+ .to_sparse(fill_value=0))
+ df['new_column'] = sp_series
+ tm.assert_series_equal(df['new_column'], pd.Series([1, 0, 0]))
+
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
| This should fix #8131.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8291 | 2014-09-17T12:05:30Z | 2014-09-17T14:48:25Z | 2014-09-17T14:48:25Z | 2014-09-17T14:48:32Z |
DOC: Cookbook Inline Examples 2014Q3 | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index a293e0a57fc0f..e8c6931cbad34 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -5,28 +5,42 @@
.. ipython:: python
:suppress:
+ import pandas as pd
import numpy as np
+
import random
import os
+ import itertools
+ import functools
+ import datetime
+
np.random.seed(123456)
- from pandas import *
- options.display.max_rows=15
- options.display.mpl_style='default'
- import pandas as pd
- randn = np.random.randn
- randint = np.random.randint
- np.set_printoptions(precision=4, suppress=True)
+
+ pd.options.display.max_rows=15
+ pd.options.display.mpl_style='default'
+ np.set_printoptions(precision=4, suppress=True)
+
+
********
Cookbook
********
This is a repository for *short and sweet* examples and links for useful pandas recipes.
-We encourage users to add to this documentation.
+We encourage users to add to this documentation.
+
+Adding interesting links and/or inline examples to this section is a great *First Pull Request*.
-This is a great *First Pull Request* (to add interesting links and/or put short code inline
-for existing links)
+Simplified, condensed, new-user friendly, in-line examples have been inserted where possible to
+augment the Stack-Overflow and GitHub links. Many of the links contain expanded information,
+above what the in-line examples offer.
+Pandas (pd) and Numpy (np) are the only two abbreviated imported modules. The rest are kept
+explicitly imported for newer users.
+
+These examples are written for python 3.4. Minor tweaks might be necessary for earlier python
+versions.
+
Idioms
------
@@ -34,53 +48,267 @@ Idioms
These are some neat pandas ``idioms``
-`How to do if-then-else?
+`if-then/if-then-else on one column, and assignment to another one or more columns:
<http://stackoverflow.com/questions/17128302/python-pandas-idiom-for-if-then-else>`__
-`How to do if-then-else #2
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]}); df
+
+if-then...
+**********
+
+An if-then on one column
+
+.. ipython:: python
+
+ df.ix[df.AAA >= 5,'BBB'] = -1; df
+
+An if-then with assignment to 2 columns:
+
+.. ipython:: python
+
+ df.ix[df.AAA >= 5,['BBB','CCC']] = 555; df
+
+Add another line with different logic, to do the -else
+
+.. ipython:: python
+
+ df.ix[df.AAA < 5,['BBB','CCC']] = 2000; df
+
+Or use pandas where after you've set up a mask
+
+.. ipython:: python
+
+ df_mask = pd.DataFrame({'AAA' : [True] * 4, 'BBB' : [False] * 4,'CCC' : [True,False] * 2})
+ df.where(df_mask,-1000)
+
+`if-then-else using numpy's where()
<http://stackoverflow.com/questions/19913659/pandas-conditional-creation-of-a-series-dataframe-column>`__
-`How to split a frame with a boolean criterion?
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]}); df
+
+ df['logic'] = np.where(df['AAA'] > 5,'high','low'); df
+
+Splitting
+*********
+
+`Split a frame with a boolean criterion
<http://stackoverflow.com/questions/14957116/how-to-split-a-dataframe-according-to-a-boolean-criterion>`__
-`How to select from a frame with complex criteria?
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]}); df
+
+ dflow = df[df.AAA <= 5]
+ dfhigh = df[df.AAA > 5]
+
+ dflow; dfhigh
+
+Building Criteria
+*****************
+
+`Select with multi-column criteria
<http://stackoverflow.com/questions/15315452/selecting-with-complex-criteria-from-pandas-dataframe>`__
-`Select rows closest to a user-defined number
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]}); df
+
+...and (without assignment returns a Series)
+
+.. ipython:: python
+
+ newseries = df.loc[(df['BBB'] < 25) & (df['CCC'] >= -40), 'AAA']; newseries
+
+...or (without assignment returns a Series)
+
+.. ipython:: python
+
+ newseries = df.loc[(df['BBB'] > 25) | (df['CCC'] >= -40), 'AAA']; newseries;
+
+...or (with assignment modifies the DataFrame.)
+
+.. ipython:: python
+
+ df.loc[(df['BBB'] > 25) | (df['CCC'] >= 75), 'AAA'] = 0.1; df
+
+`Select rows with data closest to certain value using argsort
<http://stackoverflow.com/questions/17758023/return-rows-in-a-dataframe-closest-to-a-user-defined-number>`__
-`How to reduce a sequence (e.g. of Series) using a binary operator
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]}); df
+
+ aValue = 43.0
+ df.ix[(df.CCC-aValue).abs().argsort()]
+
+`Dynamically reduce a list of criteria using a binary operators
<http://stackoverflow.com/questions/21058254/pandas-boolean-operation-in-a-python-list/21058331>`__
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]}); df
+
+ Crit1 = df.AAA <= 5.5
+ Crit2 = df.BBB == 10.0
+ Crit3 = df.CCC > -40.0
+
+One could hard code:
+
+.. ipython:: python
+
+ AllCrit = Crit1 & Crit2 & Crit3
+ AllCrit;
+
+...Or it can be done with a list of dynamically built criteria
+
+.. ipython:: python
+
+ CritList = [Crit1,Crit2,Crit3]
+ AllCrit = functools.reduce(lambda x,y: x & y, CritList)
+
+ df[AllCrit]
.. _cookbook.selection:
Selection
---------
+DataFrames
+**********
+
The :ref:`indexing <indexing>` docs.
-`Indexing using both row labels and conditionals
+`Using both row labels and value conditionals
<http://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing>`__
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]}); df
+
+ df[(df.AAA <= 6) & (df.index.isin([0,2,4]))]
+
`Use loc for label-oriented slicing and iloc positional slicing
<https://github.com/pydata/pandas/issues/2904>`__
+.. ipython:: python
+
+ data = {'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]}
+ df = pd.DataFrame(data=data,index=['foo','bar','boo','kar']); df
+
+There are 2 explicit slicing methods, with a third general case
+
+1. Positional-oriented (Python slicing style : exclusive of end)
+2. Label-oriented (Non-Python slicing style : inclusive of end)
+3. General (Either slicing style : depends on if the slice contains labels or positions)
+
+.. ipython:: python
+ df.iloc[0:3] #Positional
+
+ df.loc['bar':'kar'] #Label
+
+ #Generic
+ df.ix[0:3] #Same as .iloc[0:3]
+ df.ix['bar':'kar'] #Same as .loc['bar':'kar']
+
+Ambiguity arises when an index consists of integers with a non-zero start or non-unit increment.
+
+.. ipython:: python
+
+ df2 = pd.DataFrame(data=data,index=[1,2,3,4]); #Note index starts at 1.
+
+ df2.iloc[1:3] #Position-oriented
+
+ df2.loc[1:3] #Label-oriented
+
+ df2.ix[1:3] #General, will mimic loc (label-oriented)
+ df2.ix[0:3] #General, will mimic iloc (position-oriented), as loc[0:3] would raise a KeyError
+
+`Using inverse operator (~) to take the complement of a mask
+<http://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas>`__
+
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40], 'CCC' : [100,50,-30,-50]}); df
+
+ df[~((df.AAA <= 6) & (df.index.isin([0,2,4])))]
+
+Panels
+******
+
`Extend a panel frame by transposing, adding a new dimension, and transposing back to the original dimensions
<http://stackoverflow.com/questions/15364050/extending-a-pandas-panel-frame-along-the-minor-axis>`__
+.. ipython:: python
+
+ rng = pd.date_range('1/1/2013',periods=100,freq='D')
+ data = np.random.randn(100, 4)
+ cols = ['A','B','C','D']
+ df1, df2, df3 = pd.DataFrame(data, rng, cols), pd.DataFrame(data, rng, cols), pd.DataFrame(data, rng, cols)
+
+ pf = pd.Panel({'df1':df1,'df2':df2,'df3':df3});pf
+
+ #Assignment using Transpose (pandas < 0.15)
+ pf = pf.transpose(2,0,1)
+ pf['E'] = pd.DataFrame(data, rng, cols)
+ pf = pf.transpose(1,2,0);pf
+
+ #Direct assignment (pandas > 0.15)
+ pf.loc[:,:,'F'] = pd.DataFrame(data, rng, cols);pf
+
`Mask a panel by using np.where and then reconstructing the panel with the new masked values
<http://stackoverflow.com/questions/14650341/boolean-mask-in-pandas-panel>`__
-`Using ~ to take the complement of a boolean array, see
-<http://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas>`__
+New Columns
+***********
-`Efficiently creating columns using applymap
+`Efficiently and dynamically creating new columns using applymap
<http://stackoverflow.com/questions/16575868/efficiently-creating-additional-columns-in-a-pandas-dataframe-using-map>`__
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [1,2,1,3], 'BBB' : [1,1,2,2], 'CCC' : [2,1,3,1]}); df
+
+ source_cols = df.columns # or some subset would work too.
+ new_cols = [str(x) + "_cat" for x in source_cols]
+ categories = {1 : 'Alpha', 2 : 'Beta', 3 : 'Charlie' }
+
+ df[new_cols] = df[source_cols].applymap(categories.get);df
+
`Keep other columns when using min() with groupby
<http://stackoverflow.com/questions/23394476/keep-other-columns-when-using-min-with-groupby>`__
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'AAA' : [1,1,1,2,2,2,3,3], 'BBB' : [2,1,3,4,5,1,2,3]}); df
+
+Method 1 : idxmin() to get the index of the mins
+
+.. ipython:: python
+
+ df.loc[df.groupby("AAA")["BBB"].idxmin()]
+
+Method 2 : sort then take first of each
+
+.. ipython:: python
+
+ df.sort("BBB").groupby("AAA", as_index=False).first()
+
+Notice the same results, with the exception of the index.
+
.. _cookbook.multi_index:
MultiIndexing
@@ -91,35 +319,100 @@ The :ref:`multindexing <advanced.hierarchical>` docs.
`Creating a multi-index from a labeled frame
<http://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels>`__
+.. ipython:: python
+
+ df = pd.DataFrame({'row' : [0,1,2],
+ 'One_X' : [1.1,1.1,1.1],
+ 'One_Y' : [1.2,1.2,1.2],
+ 'Two_X' : [1.11,1.11,1.11],
+ 'Two_Y' : [1.22,1.22,1.22]}); df
+
+ # As Labelled Index
+ df = df.set_index('row');df
+ # With Heirarchical Columns
+ df.columns = pd.MultiIndex.from_tuples([tuple(c.split('_')) for c in df.columns]);df
+ # Now stack & Reset
+ df = df.stack(0).reset_index(1);df
+ # And fix the labels (Notice the label 'level_1' got added automatically)
+ df.columns = ['Sample','All_X','All_Y'];df
+
Arithmetic
-~~~~~~~~~~
+**********
`Performing arithmetic with a multi-index that needs broadcasting
<http://stackoverflow.com/questions/19501510/divide-entire-pandas-multiindex-dataframe-by-dataframe-variable/19502176#19502176>`__
+.. ipython:: python
+
+ cols = pd.MultiIndex.from_tuples([ (x,y) for x in ['A','B','C'] for y in ['O','I']])
+ df = pd.DataFrame(np.random.randn(2,6),index=['n','m'],columns=cols); df
+ df = df.div(df['C'],level=1); df
+
Slicing
-~~~~~~~
+*******
`Slicing a multi-index with xs
<http://stackoverflow.com/questions/12590131/how-to-slice-multindex-columns-in-pandas-dataframes>`__
-`Slicing a multi-index with xs #2
+.. ipython:: python
+
+ coords = [('AA','one'),('AA','six'),('BB','one'),('BB','two'),('BB','six')]
+ index = pd.MultiIndex.from_tuples(coords)
+ df = pd.DataFrame([11,22,33,44,55],index,['MyData']); df
+
+To take the cross section of the 1st level and 1st axis the index:
+
+.. ipython:: python
+
+ df.xs('BB',level=0,axis=0) #Note : level and axis are optional, and default to zero
+
+...and now the 2nd level of the 1st axis.
+
+.. ipython:: python
+
+ df.xs('six',level=1,axis=0)
+
+`Slicing a multi-index with xs, method #2
<http://stackoverflow.com/questions/14964493/multiindex-based-indexing-in-pandas>`__
+.. ipython:: python
+
+ index = list(itertools.product(['Ada','Quinn','Violet'],['Comp','Math','Sci']))
+ headr = list(itertools.product(['Exams','Labs'],['I','II']))
+
+ indx = pd.MultiIndex.from_tuples(index,names=['Student','Course'])
+ cols = pd.MultiIndex.from_tuples(headr) #Notice these are un-named
+
+ data = [[70+x+y+(x*y)%3 for x in range(4)] for y in range(9)]
+
+ df = pd.DataFrame(data,indx,cols); df
+
+ All = slice(None)
+
+ df.loc['Violet']
+ df.loc[(All,'Math'),All]
+ df.loc[(slice('Ada','Quinn'),'Math'),All]
+ df.loc[(All,'Math'),('Exams')]
+ df.loc[(All,'Math'),(All,'II')]
+
`Setting portions of a multi-index with xs
<http://stackoverflow.com/questions/19319432/pandas-selecting-a-lower-level-in-a-dataframe-to-do-a-ffill>`__
Sorting
-~~~~~~~
+*******
-`Multi-index sorting
+`Sort by specific column or an ordered list of columns, with a multi-index
<http://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas>`__
-`Partial Selection, the need for sortedness
+.. ipython:: python
+
+ df.sort(('Labs', 'II'), ascending=False)
+
+`Partial Selection, the need for sortedness;
<https://github.com/pydata/pandas/issues/2995>`__
Levels
-~~~~~~
+******
`Prepending a level to a multiindex
<http://stackoverflow.com/questions/14744068/prepend-a-level-to-a-pandas-multiindex>`__
@@ -128,7 +421,7 @@ Levels
<http://stackoverflow.com/questions/14507794/python-pandas-how-to-flatten-a-hierarchical-index-in-columns>`__
panelnd
-~~~~~~~
+*******
The :ref:`panelnd<dsintro.panelnd>` docs.
@@ -155,7 +448,7 @@ Fill forward a reversed timeseries
<http://stackoverflow.com/questions/18196811/cumsum-reset-at-nan>`__
Replace
-~~~~~~~
+*******
`Using replace with backrefs
<http://stackoverflow.com/questions/16818871/extracting-value-and-creating-new-column-out-of-it>`__
@@ -170,35 +463,130 @@ The :ref:`grouping <groupby>` docs.
`Basic grouping with apply
<http://stackoverflow.com/questions/15322632/python-pandas-df-groupy-agg-column-reference-in-agg>`__
+Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to all the columns
+
+.. ipython:: python
+
+ df = pd.DataFrame({'animal': 'cat dog cat fish dog cat cat'.split(),
+ 'size': list('SSMMMLL'),
+ 'weight': [8, 10, 11, 1, 20, 12, 12],
+ 'adult' : [False] * 5 + [True] * 2}); df
+
+ #List the size of the animals with the highest weight.
+ df.groupby('animal').apply(lambda subf: subf['size'][subf['weight'].idxmax()])
+
`Using get_group
<http://stackoverflow.com/questions/14734533/how-to-access-pandas-groupby-dataframe-by-key>`__
+.. ipython:: python
+
+ gb = df.groupby(['animal'])
+
+ gb.get_group('cat')
+
`Apply to different items in a group
<http://stackoverflow.com/questions/15262134/apply-different-functions-to-different-items-in-group-object-python-pandas>`__
+.. ipython:: python
+
+ def GrowUp(x):
+ avg_weight = sum(x[x.size == 'S'].weight * 1.5)
+ avg_weight += sum(x[x.size == 'M'].weight * 1.25)
+ avg_weight += sum(x[x.size == 'L'].weight)
+ avg_weight = avg_weight / len(x)
+ return pd.Series(['L',avg_weight,True], index=['size', 'weight', 'adult'])
+
+ expected_df = gb.apply(GrowUp)
+
+ expected_df
+
`Expanding Apply
<http://stackoverflow.com/questions/14542145/reductions-down-a-column-in-pandas>`__
-`Replacing values with groupby means
+.. ipython:: python
+
+ S = pd.Series([i / 100 for i in range(1,11)])
+
+ def CumRet(x,y):
+ return x * (1 + y)
+
+ def Red(x):
+ return functools.reduce(CumRet,x,1.0)
+
+ pd.expanding_apply(S, Red)
+
+`Replacing some values with mean of the rest of a group
<http://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means>`__
-`Sort by group with aggregation
+.. ipython:: python
+
+ df = pd.DataFrame({'A' : [1, 1, 2, 2], 'B' : [1, -1, 1, 2]})
+
+ gb = df.groupby('A')
+
+ def replace(g):
+ mask = g < 0
+ g.loc[mask] = g[~mask].mean()
+ return g
+
+ gb.transform(replace)
+
+`Sort groups by aggregated data
<http://stackoverflow.com/questions/14941366/pandas-sort-by-group-aggregate-and-column>`__
+.. ipython:: python
+
+ df = pd.DataFrame({'code': ['foo', 'bar', 'baz'] * 2,
+ 'data': [0.16, -0.21, 0.33, 0.45, -0.59, 0.62],
+ 'flag': [False, True] * 3})
+
+ code_groups = df.groupby('code')
+
+ agg_n_sort_order = code_groups[['data']].transform(sum).sort('data')
+
+ sorted_df = df.ix[agg_n_sort_order.index]
+
+ sorted_df
+
`Create multiple aggregated columns
<http://stackoverflow.com/questions/14897100/create-multiple-columns-in-pandas-aggregation-function>`__
+.. ipython:: python
+
+ rng = pd.date_range(start="2014-10-07",periods=10,freq='2min')
+ ts = pd.Series(data = list(range(10)), index = rng)
+
+ def MyCust(x):
+ if len(x) > 2:
+ return x[1] * 1.234
+ else:
+ return pd.NaT
+
+ mhc = {'Mean' : np.mean, 'Max' : np.max, 'Custom' : MyCust}
+
+ ts.resample("5min",how = mhc)
+
+ ts
+
`Create a value counts column and reassign back to the DataFrame
<http://stackoverflow.com/questions/17709270/i-want-to-create-a-column-of-value-counts-in-my-pandas-dataframe>`__
+.. ipython:: python
+
+ df = pd.DataFrame({'Color': 'Red Red Red Blue'.split(),
+ 'Value': [100, 150, 50, 50]}); df
+
+ df['Counts'] = df.groupby(['Color']).transform(len)
+ df
+
`Shift groups of the values in a column based on the index
<http://stackoverflow.com/q/23198053/190597>`__
.. ipython:: python
df = pd.DataFrame(
- {u'line_race': [10L, 10L, 8L, 10L, 10L, 8L],
- u'beyer': [99L, 102L, 103L, 103L, 88L, 100L]},
+ {u'line_race': [10, 10, 8, 10, 10, 8],
+ u'beyer': [99, 102, 103, 103, 88, 100]},
index=[u'Last Gunfighter', u'Last Gunfighter', u'Last Gunfighter',
u'Paynter', u'Paynter', u'Paynter']); df
@@ -206,7 +594,7 @@ The :ref:`grouping <groupby>` docs.
df
Expanding Data
-~~~~~~~~~~~~~~
+**************
`Alignment and to-date
<http://stackoverflow.com/questions/15489011/python-time-series-alignment-and-to-date-functions>`__
@@ -218,35 +606,109 @@ Expanding Data
<http://stackoverflow.com/questions/15771472/pandas-rolling-mean-by-time-interval>`__
Splitting
-~~~~~~~~~
+*********
`Splitting a frame
<http://stackoverflow.com/questions/13353233/best-way-to-split-a-dataframe-given-an-edge/15449992#15449992>`__
+Create a list of dataframes, split using a delineation based on logic included in rows.
+
+.. ipython:: python
+
+ df = pd.DataFrame(data={'Case' : ['A','A','A','B','A','A','B','A','A'],
+ 'Data' : np.random.randn(9)})
+
+ dfs = list(zip(*df.groupby(pd.rolling_median((1*(df['Case']=='B')).cumsum(),3,True))))[-1]
+
+ dfs[0]
+ dfs[1]
+ dfs[2]
+
.. _cookbook.pivot:
Pivot
-~~~~~
+*****
The :ref:`Pivot <reshaping.pivot>` docs.
`Partial sums and subtotals
<http://stackoverflow.com/questions/15570099/pandas-pivot-tables-row-subtotals/15574875#15574875>`__
+.. ipython:: python
+
+ df = pd.DataFrame(data={'Province' : ['ON','QC','BC','AL','AL','MN','ON'],
+ 'City' : ['Toronto','Montreal','Vancouver','Calgary','Edmonton','Winnipeg','Windsor'],
+ 'Sales' : [13,6,16,8,4,3,1]})
+ table = pd.pivot_table(df,values=['Sales'],index=['Province'],columns=['City'],aggfunc=np.sum,margins=True)
+ table.stack('City')
+
`Frequency table like plyr in R
<http://stackoverflow.com/questions/15589354/frequency-tables-in-pandas-like-plyr-in-r>`__
+.. ipython:: python
+
+ grades = [48,99,75,80,42,80,72,68,36,78]
+ df = pd.DataFrame( {'ID': ["x%d" % r for r in range(10)],
+ 'Gender' : ['F', 'M', 'F', 'M', 'F', 'M', 'F', 'M', 'M', 'M'],
+ 'ExamYear': ['2007','2007','2007','2008','2008','2008','2008','2009','2009','2009'],
+ 'Class': ['algebra', 'stats', 'bio', 'algebra', 'algebra', 'stats', 'stats', 'algebra', 'bio', 'bio'],
+ 'Participated': ['yes','yes','yes','yes','no','yes','yes','yes','yes','yes'],
+ 'Passed': ['yes' if x > 50 else 'no' for x in grades],
+ 'Employed': [True,True,True,False,False,False,False,True,True,False],
+ 'Grade': grades})
+
+ df.groupby('ExamYear').agg({'Participated': lambda x: x.value_counts()['yes'],
+ 'Passed': lambda x: sum(x == 'yes'),
+ 'Employed' : lambda x : sum(x),
+ 'Grade' : lambda x : sum(x) / len(x)})
+
Apply
-~~~~~
+*****
-`Turning embedded lists into a multi-index frame
+`Rolling Apply to Organize - Turning embedded lists into a multi-index frame
<http://stackoverflow.com/questions/17349981/converting-pandas-dataframe-with-categorical-values-into-binary-values>`__
-`Rolling apply with a DataFrame returning a Series
+.. ipython:: python
+
+ df = pd.DataFrame(data={'A' : [[2,4,8,16],[100,200],[10,20,30]], 'B' : [['a','b','c'],['jj','kk'],['ccc']]},index=['I','II','III'])
+
+ def SeriesFromSubList(aList):
+ return pd.Series(aList)
+
+ df_orgz = pd.concat(dict([ (ind,row.apply(SeriesFromSubList)) for ind,row in df.iterrows() ]))
+
+`Rolling Apply with a DataFrame returning a Series
<http://stackoverflow.com/questions/19121854/using-rolling-apply-on-a-dataframe-object>`__
+Rolling Apply to multiple columns where function calculates a Series before a Scalar from the Series is returned
+
+.. ipython:: python
+
+ df = pd.DataFrame(data=np.random.randn(2000,2)/10000,
+ index=pd.date_range('2001-01-01',periods=2000),
+ columns=['A','B']); df
+
+ def gm(aDF,Const):
+ v = ((((aDF.A+aDF.B)+1).cumprod())-1)*Const
+ return (aDF.index[0],v.iloc[-1])
+
+ S = pd.Series(dict([ gm(df.iloc[i:min(i+51,len(df)-1)],5) for i in range(len(df)-50) ])); S
+
`Rolling apply with a DataFrame returning a Scalar
<http://stackoverflow.com/questions/21040766/python-pandas-rolling-apply-two-column-input-into-function/21045831#21045831>`__
+Rolling Apply to multiple columns where function returns a Scalar (Volume Weighted Average Price)
+
+.. ipython:: python
+
+ rng = pd.date_range(start = '2014-01-01',periods = 100)
+ df = pd.DataFrame({'Open' : np.random.randn(len(rng)),
+ 'Close' : np.random.randn(len(rng)),
+ 'Volume' : np.random.randint(100,2000,len(rng))}, index=rng); df
+
+ def vwap(bars): return ((bars.Close*bars.Volume).sum()/bars.Volume.sum()).round(2)
+ window = 5
+ s = pd.concat([ (pd.Series(vwap(df.iloc[i:i+window]), index=[df.index[i+window]])) for i in range(len(df)-window) ]); s
+
Timeseries
----------
@@ -279,7 +741,7 @@ Calculate the first day of the month for each entry in a DatetimeIndex
.. _cookbook.resample:
Resampling
-~~~~~~~~~~
+**********
The :ref:`Resample <timeseries.resampling>` docs.
@@ -310,12 +772,35 @@ Merge
The :ref:`Concat <merging.concatenation>` docs. The :ref:`Join <merging.join>` docs.
-`emulate R rbind
+`Append two dataframes with overlapping index (emulate R rbind)
<http://stackoverflow.com/questions/14988480/pandas-version-of-rbind>`__
-`Self Join
+.. ipython:: python
+
+ rng = pd.date_range('2000-01-01', periods=6)
+ df1 = pd.DataFrame(np.random.randn(6, 3), index=rng, columns=['A', 'B', 'C'])
+ df2 = df1.copy()
+
+ignore_index is needed in pandas < v0.13, and depending on df construction
+
+.. ipython:: python
+
+ df = df1.append(df2,ignore_index=True); df
+
+`Self Join of a DataFrame
<https://github.com/pydata/pandas/issues/2996>`__
+.. ipython:: python
+
+ df = pd.DataFrame(data={'Area' : ['A'] * 5 + ['C'] * 2,
+ 'Bins' : [110] * 2 + [160] * 3 + [40] * 2,
+ 'Test_0' : [0, 1, 0, 1, 2, 0, 1],
+ 'Data' : np.random.randn(7)});df
+
+ df['Test_1'] = df['Test_0'] - 1
+
+ pd.merge(df, df, left_on=['Bins', 'Area','Test_0'], right_on=['Bins', 'Area','Test_1'],suffixes=('_L','_R'))
+
`How to set the index and join
<http://stackoverflow.com/questions/14341805/pandas-merge-pd-merge-how-to-set-the-index-and-join>`__
@@ -384,7 +869,7 @@ Data In/Out
.. _cookbook.csv:
CSV
-~~~
+***
The :ref:`CSV <io.read_csv_table>` docs
@@ -464,7 +949,7 @@ Parsing date components in multi-columns is faster with a format
.. _cookbook.sql:
SQL
-~~~
+***
The :ref:`SQL <io.sql>` docs
@@ -474,7 +959,7 @@ The :ref:`SQL <io.sql>` docs
.. _cookbook.excel:
Excel
-~~~~~
+*****
The :ref:`Excel <io.excel>` docs
@@ -489,7 +974,7 @@ header <http://stackoverflow.com/a/18939272/564538>`__
.. _cookbook.hdf:
HDFStore
-~~~~~~~~
+********
The :ref:`HDFStores <io.hdf5>` docs
@@ -544,8 +1029,8 @@ Storing Attributes to a group node
.. ipython:: python
- df = DataFrame(np.random.randn(8,3))
- store = HDFStore('test.h5')
+ df = pd.DataFrame(np.random.randn(8,3))
+ store = pd.HDFStore('test.h5')
store.put('df',df)
# you can store an arbitrary python object via pickle
@@ -562,7 +1047,7 @@ Storing Attributes to a group node
.. _cookbook.binary:
Binary Files
-~~~~~~~~~~~~
+************
pandas readily accepts numpy record arrays, if you need to read in a binary
file consisting of an array of C structs. For example, given this C program
@@ -606,9 +1091,6 @@ in the frame:
.. code-block:: python
- import numpy as np
- from pandas import DataFrame
-
names = 'count', 'avg', 'scale'
# note that the offsets are larger than the size of the type because of
@@ -617,7 +1099,7 @@ in the frame:
formats = 'i4', 'f8', 'f4'
dt = np.dtype({'names': names, 'offsets': offsets, 'formats': formats},
align=True)
- df = DataFrame(np.fromfile('binary.dat', dt))
+ df = pd.DataFrame(np.fromfile('binary.dat', dt))
.. note::
@@ -633,19 +1115,55 @@ Computation
`Numerical integration (sample-based) of a time series
<http://nbviewer.ipython.org/5720498>`__
-Miscellaneous
--------------
+Timedeltas
+----------
The :ref:`Timedeltas <timedeltas.timedeltas>` docs.
-`Operating with timedeltas
+`Using timedeltas
<http://github.com/pydata/pandas/pull/2899>`__
-`Create timedeltas with date differences
+.. ipython:: python
+
+ s = pd.Series(pd.date_range('2012-1-1', periods=3, freq='D'))
+
+ s - s.max()
+
+ s.max() - s
+
+ s - datetime.datetime(2011,1,1,3,5)
+
+ s + datetime.timedelta(minutes=5)
+
+ datetime.datetime(2011,1,1,3,5) - s
+
+ datetime.timedelta(minutes=5) + s
+
+`Adding and subtracting deltas and dates
+<http://stackoverflow.com/questions/16385785/add-days-to-dates-in-dataframe>`__
+
+.. ipython:: python
+
+ deltas = pd.Series([ datetime.timedelta(days=i) for i in range(3) ])
+
+ df = pd.DataFrame(dict(A = s, B = deltas)); df
+
+ df['New Dates'] = df['A'] + df['B'];
+
+ df['Delta'] = df['A'] - df['New Dates']; df
+
+ df.dtypes
+
+`Another example
<http://stackoverflow.com/questions/15683588/iterating-through-a-pandas-dataframe>`__
+
+Values can be set to NaT using np.nan, similar to datetime
-`Adding days to dates in a dataframe
-<http://stackoverflow.com/questions/16385785/add-days-to-dates-in-dataframe>`__
+.. ipython:: python
+
+ y = s - s.shift(); y
+
+ y[1] = np.nan; y
Aliasing Axis Names
-------------------
@@ -659,19 +1177,15 @@ To globally provide aliases for axis names, one can define these 2 functions:
raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias))
cls._AXIS_ALIASES[alias] = axis
-.. ipython:: python
-
def clear_axis_alias(cls, axis, alias):
if axis not in cls._AXIS_NUMBERS:
raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias))
cls._AXIS_ALIASES.pop(alias,None)
-.. ipython:: python
-
- set_axis_alias(DataFrame,'columns', 'myaxis2')
- df2 = DataFrame(randn(3,2),columns=['c1','c2'],index=['i1','i2','i3'])
+ set_axis_alias(pd.DataFrame,'columns', 'myaxis2')
+ df2 = pd.DataFrame(np.random.randn(3,2),columns=['c1','c2'],index=['i1','i2','i3'])
df2.sum(axis='myaxis2')
- clear_axis_alias(DataFrame,'columns', 'myaxis2')
+ clear_axis_alias(pd.DataFrame,'columns', 'myaxis2')
Creating Example Data
---------------------
@@ -682,7 +1196,6 @@ of the data values:
.. ipython:: python
- import itertools
def expand_grid(data_dict):
rows = itertools.product(*data_dict.values())
@@ -693,4 +1206,4 @@ of the data values:
'weight': [100, 140, 180],
'sex': ['Male', 'Female']}
)
- df
+ df
\ No newline at end of file
| As per Joris's guidance, this PR is only partially completed. I fully intend to add more. It might be a week or two, possibly more, until i'm finished.
I'll squash it all into one commit when I think I'm done and ready for final review. Feel free to make suggestions as I go, but consider it all a draft until I squash.
Addresses #6918
| https://api.github.com/repos/pandas-dev/pandas/pulls/8288 | 2014-09-17T03:21:11Z | 2014-10-10T23:09:30Z | 2014-10-10T23:09:30Z | 2014-10-11T02:34:58Z |
CLN Pep8 on format.py | diff --git a/pandas/core/format.py b/pandas/core/format.py
index dc19c855dd857..fe5cbb7337aec 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1,5 +1,4 @@
-
-#coding: utf-8
+# -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable=W0141
@@ -115,7 +114,6 @@ def to_string(self):
return u('')
fmt_values = self._get_formatted_values()
- pad_space = 10
result = ['%s' % i for i in fmt_values]
if self.footer:
@@ -180,7 +178,6 @@ def _get_footer(self):
footer += "\n"
footer += level_info
-
return compat.text_type(footer)
def _get_formatted_index(self):
@@ -248,7 +245,8 @@ class TableFormatter(object):
@property
def should_show_dimensions(self):
- return self.show_dimensions is True or (self.show_dimensions == 'truncate' and self.is_truncated)
+ return self.show_dimensions is True or (self.show_dimensions == 'truncate' and
+ self.is_truncated)
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
@@ -333,7 +331,7 @@ def _chk_truncate(self):
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal (why else = 0)
- (w,h) = get_terminal_size()
+ (w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
@@ -346,14 +344,14 @@ def _chk_truncate(self):
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the screen
- if max_cols == 0 and len(self.frame.columns) > w:
+ if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
- if not hasattr(self,'max_rows_adj'):
+ if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
- if not hasattr(self,'max_cols_adj'):
+ if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
@@ -367,21 +365,21 @@ def _chk_truncate(self):
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
- frame = frame.iloc[:,:max_cols]
+ frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
- frame = concat( (frame.iloc[:,:col_num],frame.iloc[:,-col_num:]),axis=1 )
+ frame = concat((frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
- frame = frame.iloc[:max_rows,:]
+ frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
- frame = concat( (frame.iloc[:row_num,:],frame.iloc[-row_num:,:]) )
+ frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
self.tr_frame = frame
@@ -413,7 +411,6 @@ def _to_str_columns(self):
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=max_colwidth)
-
max_len = max(np.max([_strlen(x) for x in fmt_values]),
max_colwidth)
if self.justify == 'left':
@@ -446,7 +443,7 @@ def _to_str_columns(self):
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
- for ix,col in enumerate(strcols):
+ for ix, col in enumerate(strcols):
cwidth = len(strcols[ix][row_num]) # infer from above row
is_dot_col = False
if truncate_h:
@@ -484,7 +481,7 @@ def to_string(self):
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print the whole frame
text = adjoin(1, *strcols)
- elif not isinstance(self.max_cols,int) or self.max_cols > 0: # perhaps need to wrap around
+ elif not isinstance(self.max_cols, int) or self.max_cols > 0: # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = adjoin(1, *strcols).split('\n')
@@ -494,7 +491,7 @@ def to_string(self):
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
- max_len += size_tr_col # Need to make space for largest row plus truncate (dot) col
+ max_len += size_tr_col # Need to make space for largest row plus truncate dot col
dif = max_len - self.w
adj_dif = dif
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
@@ -505,7 +502,7 @@ def to_string(self):
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
- adj_dif -= ( col_len + 1 ) # adjoin adds one
+ adj_dif -= (col_len + 1) # adjoin adds one
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
max_cols_adj = n_cols - self.index # subtract index column
@@ -562,7 +559,8 @@ def to_latex(self, column_format=None, longtable=False):
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
self.escape = self.kwds.get('escape', True)
- #TODO: column_format is not settable in df.to_latex
+
+ # TODO: column_format is not settable in df.to_latex
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
@@ -580,7 +578,6 @@ def get_col_type(dtype):
strcols = self._to_str_columns()
if self.index and isinstance(self.frame.index, MultiIndex):
- fmt = self._get_formatter('__index__')
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.columns.names)
@@ -595,7 +592,7 @@ def get_col_type(dtype):
column_format = ''.join(map(get_col_type, dtypes))
if self.index:
index_format = 'l' * self.frame.index.nlevels
- column_format = index_format + column_format
+ column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, not %s'
@@ -623,7 +620,7 @@ def write(buf, frame, column_format, strcols, longtable=False):
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.escape:
- crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first
+ crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first
.replace('_', '\\_')
.replace('%', '\\%')
.replace('$', '\\$')
@@ -678,7 +675,7 @@ def to_html(self, classes=None):
raise TypeError('buf is not a file name and it has no write '
' method')
- def _get_formatted_column_labels(self,frame):
+ def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
def is_numeric_dtype(dtype):
@@ -692,15 +689,15 @@ def is_numeric_dtype(dtype):
dtypes = self.frame.dtypes.values
# if we have a Float level, they don't use leading space at all
- restrict_formatting = any([ l.is_floating for l in columns.levels ])
+ restrict_formatting = any([l.is_floating for l in columns.levels])
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
- def space_format(x,y):
+ def space_format(x, y):
if y not in self.formatters and need_leadsp[x] and not restrict_formatting:
return ' ' + y
return y
- str_columns = list(zip(*[ [ space_format(x,y) for y in x ] for x in fmt_columns ]))
+ str_columns = list(zip(*[[space_format(x, y) for y in x] for x in fmt_columns]))
if self.sparsify:
str_columns = _sparsify(str_columns)
@@ -788,7 +785,8 @@ def __init__(self, formatter, classes=None, max_rows=None, max_cols=None):
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
- self.is_truncated = self.max_rows < len(self.fmt.frame) or self.max_cols < len(self.fmt.columns)
+ self.is_truncated = (self.max_rows < len(self.fmt.frame) or
+ self.max_cols < len(self.fmt.columns))
def write(self, s, indent=0):
rs = com.pprint_thing(s)
@@ -922,7 +920,7 @@ def _column_header():
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
- for tag,span in list(records.items()):
+ for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
@@ -931,8 +929,8 @@ def _column_header():
values = values[:ins_col] + (u('...'),) + \
values[ins_col:]
else: # sparse col headers do not receive a ...
- values = values[:ins_col] + \
- (values[ins_col - 1],) + values[ins_col:]
+ values = (values[:ins_col] + (values[ins_col - 1],) +
+ values[ins_col:])
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers get ...
@@ -946,7 +944,7 @@ def _column_header():
records[ins_col] = 1
else:
recs_new = {}
- for tag,span in list(records.items()):
+ for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
@@ -1032,10 +1030,11 @@ def _write_regular_rows(self, fmt_values, indent):
else:
index_values = self.fmt.tr_frame.index.format()
+ row = []
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
- str_sep_row = [ '...' for ele in row ]
+ str_sep_row = ['...' for ele in row]
self.write_tr(str_sep_row, indent, self.indent_delta, tags=None,
nindex_levels=1)
@@ -1059,15 +1058,13 @@ def _write_hierarchical_rows(self, fmt_values, indent):
nrows = len(frame)
row_levels = self.frame.index.nlevels
- idx_values = frame.index.format(sparsify=False, adjoin=False,
- names=False)
+ idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
- levels = frame.index.format(sparsify=sentinel,
- adjoin=False, names=False)
+ levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = _get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
@@ -1075,28 +1072,28 @@ def _write_hierarchical_rows(self, fmt_values, indent):
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
- for lnum,records in enumerate(level_lengths):
+ for lnum, records in enumerate(level_lengths):
rec_new = {}
- for tag,span in list(records.items()):
+ for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u('...')
- idx_values.insert(ins_row,tuple(dot_row))
+ idx_values.insert(ins_row, tuple(dot_row))
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
- idx_values.insert(ins_row,tuple([u('...')]*len(level_lengths)))
+ idx_values.insert(ins_row, tuple([u('...')]*len(level_lengths)))
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
- fmt_values[ix_col].insert(ins_row,'...')
+ fmt_values[ix_col].insert(ins_row, '...')
nrows += 1
for i in range(nrows):
@@ -1134,6 +1131,7 @@ def _write_hierarchical_rows(self, fmt_values, indent):
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=frame.index.nlevels)
+
def _get_level_lengths(levels, sentinel=''):
from itertools import groupby
@@ -1387,7 +1385,7 @@ def save(self):
self.writer = csv.writer(f, **writer_kwargs)
if self.engine == 'python':
- # to be removed in 0.13
+ # to be removed in 0.13
self._helper_csv(self.writer, na_rep=self.na_rep,
float_format=self.float_format,
cols=self.cols, header=self.header,
@@ -1823,7 +1821,7 @@ def get_formatted_cells(self):
cell.val = self._format_value(cell.val)
yield cell
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# Array formatters
@@ -1992,10 +1990,10 @@ def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
self.date_format = date_format
def _format_strings(self):
- formatter = self.formatter or _get_format_datetime64_from_values(
- self.values,
- nat_rep=self.nat_rep,
- date_format=self.date_format)
+ formatter = (self.formatter or
+ _get_format_datetime64_from_values(self.values,
+ nat_rep=self.nat_rep,
+ date_format=self.date_format))
fmt_values = [formatter(x) for x in self.values]
@@ -2039,8 +2037,8 @@ def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(x,
- nat_rep=nat_rep,
- date_format=date_format)
+ nat_rep=nat_rep,
+ date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
@@ -2062,25 +2060,27 @@ def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
self.box = box
def _format_strings(self):
- formatter = self.formatter or _get_format_timedelta64(self.values, nat_rep=self.nat_rep, box=self.box)
+ formatter = self.formatter or _get_format_timedelta64(self.values, nat_rep=self.nat_rep,
+ box=self.box)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
- return a formatter function for a range of timedeltas. These will all have the same format argument
+ Return a formatter function for a range of timedeltas.
+ These will all have the same format argument
- if box, then show the return in quotes
+ If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
- one_day_in_nanos = (86400 * 1e9)
- even_days = np.logical_and(consider_values, values_int % one_day_in_nanos != 0).sum() == 0
- all_sub_day = np.logical_and(consider_values, np.abs(values_int) >= one_day_in_nanos).sum() == 0
+ one_day_nanos = (86400 * 1e9)
+ even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
+ all_sub_day = np.logical_and(consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = 'even_day'
@@ -2102,6 +2102,7 @@ def _formatter(x):
return _formatter
+
def _make_fixed_width(strings, justify='right', minimum=None):
if len(strings) == 0 or justify == 'all':
return strings
@@ -2182,7 +2183,7 @@ def _has_names(index):
return index.name is not None
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# Global formatting options
_initial_defencoding = None
| While looking at making a separate PR in format.py, I decided to first clean up the file and make it PEP8 compliant. I used 99 maximum-line-length per http://legacy.python.org/dev/peps/pep-0008/#maximum-line-length.
I also noticed that line 965 (in master) has a list comprehension that iterates through a non-instantiated variable (**row**):
``` python
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = [ '...' for ele in row ]
self.write_tr(str_sep_row, indent, self.indent_delta, tags=None, nindex_levels=1)
row = []
row.append(index_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
```
By the time it has gone through the loop once, **row** is set, but it really should be instantiated ahead of the loop to be safe.
Additionally, I thought it might be useful to add a PEP8 test in test_format, just to make sure that future commits don't break PEP8.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8286 | 2014-09-17T01:41:42Z | 2014-09-19T12:12:03Z | 2014-09-19T12:12:03Z | 2015-01-25T11:43:56Z |
BUG: make sure that the multi-index is lex-sorted before passing to _lexsort_indexer (GH8017) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index bd7a95a2ae0f4..8b467d768df8b 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -667,7 +667,6 @@ Enhancements
-- Bug in ``get`` where an ``IndexError`` would not cause the default value to be returned (:issue:`7725`)
@@ -745,10 +744,10 @@ Bug Fixes
- Bug in DataFrameGroupby.transform when transforming with a passed non-sorted key (:issue:`8046`)
- Bug in repeated timeseries line and area plot may result in ``ValueError`` or incorrect kind (:issue:`7733`)
- Bug in inference in a MultiIndex with ``datetime.date`` inputs (:issue:`7888`)
-
+- Bug in ``get`` where an ``IndexError`` would not cause the default value to be returned (:issue:`7725`)
- Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may reset nanosecond (:issue:`7697`)
- Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may raise ``AttributeError`` if ``Timestamp`` has ``dateutil`` tzinfo (:issue:`7697`)
-
+- Bug in sorting a multi-index frame with a Float64Index (:issue:`8017`)
- Bug in ``is_superperiod`` and ``is_subperiod`` cannot handle higher frequencies than ``S`` (:issue:`7760`, :issue:`7772`, :issue:`7803`)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index e8645e578c976..e023f79d9f3dd 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -625,10 +625,17 @@ def is_numeric_dtype(dtype):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
dtypes = self.frame.dtypes.values
+
+ # if we have a Float level, they don't use leading space at all
+ restrict_formatting = any([ l.is_floating for l in columns.levels ])
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
- str_columns = list(zip(*[
- [' ' + y if y not in self.formatters and need_leadsp[x]
- else y for y in x] for x in fmt_columns]))
+
+ def space_format(x,y):
+ if y not in self.formatters and need_leadsp[x] and not restrict_formatting:
+ return ' ' + y
+ return y
+
+ str_columns = list(zip(*[ [ space_format(x,y) for y in x ] for x in fmt_columns ]))
if self.sparsify:
str_columns = _sparsify(str_columns)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index dd3d5c0e31196..939a94c033ea0 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2770,6 +2770,12 @@ def trans(v):
na_position=na_position)
elif isinstance(labels, MultiIndex):
+
+ # make sure that the axis is lexsorted to start
+ # if not we need to reconstruct to get the correct indexer
+ if not labels.is_lexsorted():
+ labels = MultiIndex.from_tuples(labels.values)
+
indexer = _lexsort_indexer(labels.labels, orders=ascending,
na_position=na_position)
indexer = com._ensure_platform_int(indexer)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ed47161b9a957..f4192e5761d7a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1628,6 +1628,7 @@ def sort_index(self, axis=0, ascending=True):
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis})
+
_shared_docs['reindex'] = """
Conform %(klass)s to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
@@ -3558,10 +3559,10 @@ def _tz_convert(ax, tz):
result = self._constructor(self._data, copy=copy)
result.set_axis(axis,ax)
return result.__finalize__(self)
-
+
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
- def tz_localize(self, tz, axis=0, level=None, copy=True,
+ def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise'):
"""
Localize tz-naive TimeSeries to target time zone
@@ -3583,7 +3584,7 @@ def tz_localize(self, tz, axis=0, level=None, copy=True,
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
-
+
Returns
-------
"""
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 0bfa9be2feacf..2171b8e8428a4 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -214,6 +214,44 @@ def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEqual(result.index.names, self.frame.index.names)
+ def test_sorting_repr_8017(self):
+
+ np.random.seed(0)
+ data = np.random.randn(3,4)
+
+ for gen, extra in [([1.,3.,2.,5.],4.),
+ ([1,3,2,5],4),
+ ([Timestamp('20130101'),Timestamp('20130103'),Timestamp('20130102'),Timestamp('20130105')],Timestamp('20130104')),
+ (['1one','3one','2one','5one'],'4one')]:
+ columns = MultiIndex.from_tuples([('red', i) for i in gen])
+ df = DataFrame(data, index=list('def'), columns=columns)
+ df2 = pd.concat([df,DataFrame('world',
+ index=list('def'),
+ columns=MultiIndex.from_tuples([('red', extra)]))],axis=1)
+
+ # check that the repr is good
+ # make sure that we have a correct sparsified repr
+ # e.g. only 1 header of read
+ self.assertEqual(str(df2).splitlines()[0].split(),['red'])
+
+ # GH 8017
+ # sorting fails after columns added
+
+ # construct single-dtype then sort
+ result = df.copy().sort_index(axis=1)
+ expected = df.iloc[:,[0,2,1,3]]
+ assert_frame_equal(result, expected)
+
+ result = df2.sort_index(axis=1)
+ expected = df2.iloc[:,[0,2,1,4,3]]
+ assert_frame_equal(result, expected)
+
+ # setitem then sort
+ result = df.copy()
+ result[('red',extra)] = 'world'
+ result = result.sort_index(axis=1)
+ assert_frame_equal(result, expected)
+
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
| BUG: sparse repr of multi-index frame with a FloatIndex as a level was incorrect
closes #8017
| https://api.github.com/repos/pandas-dev/pandas/pulls/8282 | 2014-09-16T15:18:35Z | 2014-09-17T12:20:13Z | 2014-09-17T12:20:13Z | 2014-10-29T14:47:16Z |
ENH groupby: more user-friendly error message (GH7511) | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index b2218c10a284e..350f81069bb60 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1941,7 +1941,8 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
if getattr(self.grouper,'ndim', 1) != 1:
- raise ValueError("Grouper result with an ndim != 1")
+ t = self.name or str(type(self.grouper))
+ raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
| better error message for groupy on duplicated columns name. (Followup for branch GH8210 (Bug GH7511)). E.g.
```
ValueError: Grouper for 'A' not 1-dimensional
ValueError: Grouper for '<class '__main__.gr'>' not 1-dimensional
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/8281 | 2014-09-16T08:24:32Z | 2014-09-16T14:43:54Z | 2014-09-16T14:43:54Z | 2014-10-02T02:35:07Z |
API: add ddof to expanding/rolling_cov() | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index a8213a50a9ead..7c810cd3d526b 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -115,6 +115,13 @@ API changes
:func:`expanding_cov`, :func:`expanding_corr`, :func:`expanding_corr_pairwise`, and :func:`expanding_apply`,
as the results produced when ``center=True`` did not make much sense. (:issue:`7925`)
+- Added optional ``ddof`` argument to :func:`expanding_cov` and :func:`rolling_cov`.
+ The default value of ``1`` is backwards-compatible. (:issue:`8279`)
+
+- Documented the ``ddof`` argument to :func:`expanding_var`, :func:`expanding_std`,
+ :func:`rolling_var`, and :func:`rolling_std`. These functions' support of a
+ ``ddof`` argument (with a default value of ``1``) was previously undocumented. (:issue:`8064`)
+
- :func:`ewma`, :func:`ewmstd`, :func:`ewmvol`, :func:`ewmvar`, :func:`ewmcov`, and :func:`ewmcorr`
now interpret ``min_periods`` in the same manner that the ``rolling_*`` and ``expanding_*`` functions do:
a given result entry will be ``NaN`` if the (expanding, in this case) window does not contain
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index 82423da26b53f..41a768783b1cb 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -167,6 +167,11 @@
elements, only complete pairwise observations will be used.
"""
+_ddof_kw = """ddof : int, default 1
+ Delta Degrees of Freedom. The divisor used in calculations
+ is ``N - ddof``, where ``N`` represents the number of elements.
+"""
+
_bias_kw = r"""bias : boolean, default False
Use a standard estimation bias correction
"""
@@ -216,10 +221,10 @@ def rolling_count(arg, window, freq=None, center=False, how=None):
@Substitution("Unbiased moving covariance.", _binary_arg_flex,
- _roll_kw%'None'+_pairwise_kw, _flex_retval, _roll_notes)
+ _roll_kw%'None'+_pairwise_kw+_ddof_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None,
- center=False, pairwise=None, how=None):
+ center=False, pairwise=None, how=None, ddof=1):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
@@ -233,7 +238,7 @@ def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None,
def _get_cov(X, Y):
mean = lambda x: rolling_mean(x, window, min_periods, center=center)
count = rolling_count(X + Y, window, center=center)
- bias_adj = count / (count - 1)
+ bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
rs = _flex_binary_moment(arg1, arg2, _get_cov, pairwise=bool(pairwise))
return rs
@@ -620,14 +625,14 @@ def _use_window(minp, window):
return minp
-def _rolling_func(func, desc, check_minp=_use_window, how=None):
+def _rolling_func(func, desc, check_minp=_use_window, how=None, additional_kw=''):
if how is None:
how_arg_str = 'None'
else:
how_arg_str = "'%s"%how
- @Substitution(desc, _unary_arg, _roll_kw%how_arg_str, _type_of_input_retval,
- _roll_notes)
+ @Substitution(desc, _unary_arg, _roll_kw%how_arg_str + additional_kw,
+ _type_of_input_retval, _roll_notes)
@Appender(_doc_template)
@wraps(func)
def f(arg, window, min_periods=None, freq=None, center=False, how=how,
@@ -648,10 +653,12 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
how='median')
_ts_std = lambda *a, **kw: _zsqrt(algos.roll_var(*a, **kw))
-rolling_std = _rolling_func(_ts_std, 'Unbiased moving standard deviation.',
- check_minp=_require_min_periods(1))
-rolling_var = _rolling_func(algos.roll_var, 'Unbiased moving variance.',
- check_minp=_require_min_periods(1))
+rolling_std = _rolling_func(_ts_std, 'Moving standard deviation.',
+ check_minp=_require_min_periods(1),
+ additional_kw=_ddof_kw)
+rolling_var = _rolling_func(algos.roll_var, 'Moving variance.',
+ check_minp=_require_min_periods(1),
+ additional_kw=_ddof_kw)
rolling_skew = _rolling_func(algos.roll_skew, 'Unbiased moving skewness.',
check_minp=_require_min_periods(3))
rolling_kurt = _rolling_func(algos.roll_kurt, 'Unbiased moving kurtosis.',
@@ -864,8 +871,9 @@ def _pop_args(win_type, arg_names, kwargs):
return all_args
-def _expanding_func(func, desc, check_minp=_use_window):
- @Substitution(desc, _unary_arg, _expanding_kw, _type_of_input_retval, "")
+def _expanding_func(func, desc, check_minp=_use_window, additional_kw=''):
+ @Substitution(desc, _unary_arg, _expanding_kw + additional_kw,
+ _type_of_input_retval, "")
@Appender(_doc_template)
@wraps(func)
def f(arg, min_periods=1, freq=None, **kwargs):
@@ -883,20 +891,18 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
expanding_min = _expanding_func(algos.roll_min2, 'Expanding minimum.')
expanding_sum = _expanding_func(algos.roll_sum, 'Expanding sum.')
expanding_mean = _expanding_func(algos.roll_mean, 'Expanding mean.')
-expanding_median = _expanding_func(
- algos.roll_median_cython, 'Expanding median.')
-
-expanding_std = _expanding_func(_ts_std,
- 'Unbiased expanding standard deviation.',
- check_minp=_require_min_periods(1))
-expanding_var = _expanding_func(algos.roll_var, 'Unbiased expanding variance.',
- check_minp=_require_min_periods(1))
-expanding_skew = _expanding_func(
- algos.roll_skew, 'Unbiased expanding skewness.',
- check_minp=_require_min_periods(3))
-expanding_kurt = _expanding_func(
- algos.roll_kurt, 'Unbiased expanding kurtosis.',
- check_minp=_require_min_periods(4))
+expanding_median = _expanding_func(algos.roll_median_cython, 'Expanding median.')
+
+expanding_std = _expanding_func(_ts_std, 'Expanding standard deviation.',
+ check_minp=_require_min_periods(1),
+ additional_kw=_ddof_kw)
+expanding_var = _expanding_func(algos.roll_var, 'Expanding variance.',
+ check_minp=_require_min_periods(1),
+ additional_kw=_ddof_kw)
+expanding_skew = _expanding_func(algos.roll_skew, 'Unbiased expanding skewness.',
+ check_minp=_require_min_periods(3))
+expanding_kurt = _expanding_func(algos.roll_kurt, 'Unbiased expanding kurtosis.',
+ check_minp=_require_min_periods(4))
def expanding_count(arg, freq=None):
@@ -953,9 +959,9 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None):
@Substitution("Unbiased expanding covariance.", _binary_arg_flex,
- _expanding_kw+_pairwise_kw, _flex_retval, "")
+ _expanding_kw+_pairwise_kw+_ddof_kw, _flex_retval, "")
@Appender(_doc_template)
-def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
+def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, pairwise=None, ddof=1):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
@@ -966,7 +972,7 @@ def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
window = max((len(arg1) + len(arg2)), min_periods) if min_periods else (len(arg1) + len(arg2))
return rolling_cov(arg1, arg2, window,
min_periods=min_periods, freq=freq,
- pairwise=pairwise)
+ pairwise=pairwise, ddof=ddof)
@Substitution("Expanding sample correlation.", _binary_arg_flex,
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 55c618646c4f5..94c2521ff6938 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -4,6 +4,7 @@
from datetime import datetime
from numpy.random import randn
+from numpy.testing.decorators import slow
import numpy as np
from distutils.version import LooseVersion
@@ -813,6 +814,7 @@ def _non_null_values(x):
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
+ @slow
def test_ewm_consistency(self):
def _weights(s, com, adjust, ignore_na):
@@ -877,6 +879,7 @@ def _ewma(s, com, min_periods, adjust, ignore_na):
cov_biased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
var_debiasing_factors=lambda x: _variance_debiasing_factors(x, com=com, adjust=adjust, ignore_na=ignore_na))
+ @slow
def test_expanding_consistency(self):
base_functions = [
(mom.expanding_count, lambda v: Series(v).count(), None),
@@ -931,7 +934,7 @@ def test_expanding_consistency(self):
cov_unbiased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods),
var_biased=lambda x: mom.expanding_var(x, min_periods=min_periods, ddof=0),
std_biased=lambda x: mom.expanding_std(x, min_periods=min_periods, ddof=0),
- cov_biased=None,
+ cov_biased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods, ddof=0),
var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
)
@@ -967,6 +970,7 @@ def test_expanding_consistency(self):
expected.iloc[:, i, j] = expanding_f(x.iloc[:, i], x.iloc[:, j], min_periods=min_periods)
assert_panel_equal(expanding_f_result, expected)
+ @slow
def test_rolling_consistency(self):
base_functions = [
(mom.rolling_count, lambda v: Series(v).count(), None),
@@ -979,7 +983,7 @@ def test_rolling_consistency(self):
(mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None),
(mom.rolling_var, lambda v: Series(v).var(), 1),
#(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
- # (mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
+ #(mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
#(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center),
# lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
(mom.rolling_median, lambda v: Series(v).median(), None),
@@ -1026,7 +1030,7 @@ def test_rolling_consistency(self):
cov_unbiased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center),
var_biased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center, ddof=0),
std_biased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center, ddof=0),
- cov_biased=None,
+ cov_biased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center, ddof=0),
var_debiasing_factors=lambda x: mom.rolling_count(x, window=window, center=center).divide(
(mom.rolling_count(x, window=window, center=center) - 1.).replace(0., np.nan)),
)
| Closes #8279
closes #8064
| https://api.github.com/repos/pandas-dev/pandas/pulls/8280 | 2014-09-16T03:02:07Z | 2014-09-17T18:03:54Z | 2014-09-17T18:03:54Z | 2014-09-17T18:07:20Z |
Eliminating contextmanager based transaction-handling | diff --git a/ci/requirements-2.6.txt b/ci/requirements-2.6.txt
index baba82f588ed6..3a845f4ee0540 100644
--- a/ci/requirements-2.6.txt
+++ b/ci/requirements-2.6.txt
@@ -5,7 +5,7 @@ pytz==2013b
http://www.crummy.com/software/BeautifulSoup/bs4/download/4.2/beautifulsoup4-4.2.0.tar.gz
html5lib==1.0b2
numexpr==1.4.2
-sqlalchemy==0.7.4
+sqlalchemy==0.7.10
pymysql==0.6.0
psycopg2==2.5
scipy==0.11.0
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 83b96d5186dd2..513ac1241ffdb 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -19,6 +19,7 @@
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
+from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
@@ -637,13 +638,9 @@ def insert_data(self):
return column_names, data_list
- def get_session(self):
- con = self.pd_sql.engine.connect()
- return con.begin()
-
- def _execute_insert(self, trans, keys, data_iter):
+ def _execute_insert(self, conn, keys, data_iter):
data = [dict( (k, v) for k, v in zip(keys, row) ) for row in data_iter]
- trans.connection.execute(self.insert_statement(), data)
+ conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
@@ -653,7 +650,7 @@ def insert(self, chunksize=None):
chunksize = nrows
chunks = int(nrows / chunksize) + 1
- with self.get_session() as trans:
+ with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
@@ -661,7 +658,7 @@ def insert(self, chunksize=None):
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
- self._execute_insert(trans, keys, chunk_iter)
+ self._execute_insert(conn, keys, chunk_iter)
def read(self, coerce_float=True, parse_dates=None, columns=None):
@@ -884,6 +881,9 @@ def __init__(self, engine, schema=None, meta=None):
self.meta = meta
+ def run_transaction(self):
+ return self.engine.begin()
+
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy engine"""
return self.engine.execute(*args, **kwargs)
@@ -1017,9 +1017,9 @@ def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
- with self.get_session():
+ with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
- self.pd_sql.execute(stmt)
+ conn.execute(stmt)
def insert_statement(self):
names = list(map(str, self.frame.columns))
@@ -1038,12 +1038,9 @@ def insert_statement(self):
self.name, col_names, wildcards)
return insert_statement
- def get_session(self):
- return self.pd_sql.con
-
- def _execute_insert(self, trans, keys, data_iter):
+ def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
- trans.executemany(self.insert_statement(), data_list)
+ conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""Return a list of SQL statement that create a table reflecting the
@@ -1125,6 +1122,17 @@ def __init__(self, con, flavor, is_cursor=False):
else:
self.flavor = flavor
+ @contextmanager
+ def run_transaction(self):
+ cur = self.con.cursor()
+ try:
+ yield cur
+ self.con.commit()
+ except:
+ self.con.rollback()
+ finally:
+ cur.close()
+
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 80988ab2f5e1c..f02c701d97bcf 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -331,6 +331,28 @@ def _to_sql_save_index(self):
ix_cols = self._get_index_columns('test_to_sql_saves_index')
self.assertEqual(ix_cols, [['A',],])
+ def _transaction_test(self):
+ self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
+
+ ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
+
+ # Make sure when transaction is rolled back, no rows get inserted
+ try:
+ with self.pandasSQL.run_transaction() as trans:
+ trans.execute(ins_sql)
+ raise Exception('error')
+ except:
+ # ignore raised exception
+ pass
+ res = self.pandasSQL.read_sql('SELECT * FROM test_trans')
+ self.assertEqual(len(res), 0)
+
+ # Make sure when transaction is committed, rows do get inserted
+ with self.pandasSQL.run_transaction() as trans:
+ trans.execute(ins_sql)
+ res2 = self.pandasSQL.read_sql('SELECT * FROM test_trans')
+ self.assertEqual(len(res2), 1)
+
#------------------------------------------------------------------------------
#--- Testing the public API
@@ -1072,6 +1094,8 @@ def _get_index_columns(self, tbl_name):
def test_to_sql_save_index(self):
self._to_sql_save_index()
+ def test_transactions(self):
+ self._transaction_test()
class TestSQLiteAlchemy(_TestSQLAlchemy):
"""
@@ -1380,6 +1404,8 @@ def _get_index_columns(self, tbl_name):
def test_to_sql_save_index(self):
self._to_sql_save_index()
+ def test_transactions(self):
+ self._transaction_test()
class TestMySQLLegacy(TestSQLiteLegacy):
"""
| To close #8277
| https://api.github.com/repos/pandas-dev/pandas/pulls/8278 | 2014-09-16T02:04:25Z | 2014-09-17T09:06:27Z | 2014-09-17T09:06:27Z | 2014-09-17T09:06:48Z |
BUG: rolling_apply(..., center=True) should not append NaNs | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index bd7a95a2ae0f4..53ceab761f6a0 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -88,7 +88,8 @@ API changes
:func:`rolling_window`, and :func:`rolling_apply` with ``center=True`` previously would return a result of the same
structure as the input ``arg`` with ``NaN`` in the final ``(window-1)/2`` entries.
Now the final ``(window-1)/2`` entries of the result are calculated as if the input ``arg`` were followed
- by ``(window-1)/2`` ``NaN`` values. (:issue:`7925`)
+ by ``(window-1)/2`` ``NaN`` values (or with shrinking windows, in the case of :func:`rolling_apply`).
+ (:issue:`7925`, :issue:`8269`)
Prior behavior (note final value is ``NaN``):
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 77d8cea4de507..8f37d76e50f9c 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -1846,8 +1846,9 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int win,
return output
-def roll_generic(ndarray[float64_t, cast=True] input, int win,
- int minp, object func, object args, object kwargs):
+def roll_generic(ndarray[float64_t, cast=True] input,
+ int win, int minp, int offset,
+ object func, object args, object kwargs):
cdef ndarray[double_t] output, counts, bufarr
cdef Py_ssize_t i, n
cdef float64_t *buf
@@ -1856,37 +1857,41 @@ def roll_generic(ndarray[float64_t, cast=True] input, int win,
if not input.flags.c_contiguous:
input = input.copy('C')
- buf = <float64_t*> input.data
-
n = len(input)
if n == 0:
return input
minp = _check_minp(win, minp, n, floor=0)
output = np.empty(n, dtype=float)
- counts = roll_sum(np.isfinite(input).astype(float), win, minp)
+ counts = roll_sum(np.concatenate((np.isfinite(input).astype(float), np.array([0.] * offset))), win, minp)[offset:]
- bufarr = np.empty(win, dtype=float)
- oldbuf = <float64_t*> bufarr.data
-
- n = len(input)
- for i from 0 <= i < int_min(win, n):
+ # truncated windows at the beginning, through first full-length window
+ for i from 0 <= i < (int_min(win, n) - offset):
if counts[i] >= minp:
- output[i] = func(input[int_max(i - win + 1, 0) : i + 1], *args,
- **kwargs)
+ output[i] = func(input[0 : (i + offset + 1)], *args, **kwargs)
else:
output[i] = NaN
- for i from win <= i < n:
+ # remaining full-length windows
+ buf = <float64_t*> input.data
+ bufarr = np.empty(win, dtype=float)
+ oldbuf = <float64_t*> bufarr.data
+ for i from (win - offset) <= i < (n - offset):
buf = buf + 1
bufarr.data = <char*> buf
if counts[i] >= minp:
output[i] = func(bufarr, *args, **kwargs)
else:
output[i] = NaN
-
bufarr.data = <char*> oldbuf
+ # truncated windows at the end
+ for i from int_max(n - offset, 0) <= i < n:
+ if counts[i] >= minp:
+ output[i] = func(input[int_max(i + offset - win + 1, 0) : n], *args, **kwargs)
+ else:
+ output[i] = NaN
+
return output
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index 49de02c23cc47..82423da26b53f 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -738,11 +738,12 @@ def rolling_apply(arg, window, func, min_periods=None, freq=None,
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
+ offset = int((window - 1) / 2.) if center else 0
def call_cython(arg, window, minp, args, kwargs):
minp = _use_window(minp, window)
- return algos.roll_generic(arg, window, minp, func, args, kwargs)
+ return algos.roll_generic(arg, window, minp, offset, func, args, kwargs)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
- center=center, args=args, kwargs=kwargs)
+ center=False, args=args, kwargs=kwargs)
def rolling_window(arg, window=None, win_type=None, min_periods=None,
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 1d0be4ce48f4f..55c618646c4f5 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -5,6 +5,7 @@
from datetime import datetime
from numpy.random import randn
import numpy as np
+from distutils.version import LooseVersion
from pandas import Series, DataFrame, Panel, bdate_range, isnull, notnull
from pandas.util.testing import (
@@ -877,6 +878,45 @@ def _ewma(s, com, min_periods, adjust, ignore_na):
var_debiasing_factors=lambda x: _variance_debiasing_factors(x, com=com, adjust=adjust, ignore_na=ignore_na))
def test_expanding_consistency(self):
+ base_functions = [
+ (mom.expanding_count, lambda v: Series(v).count(), None),
+ (mom.expanding_max, lambda v: Series(v).max(), None),
+ (mom.expanding_min, lambda v: Series(v).min(), None),
+ (mom.expanding_sum, lambda v: Series(v).sum(), None),
+ (mom.expanding_mean, lambda v: Series(v).mean(), None),
+ (mom.expanding_std, lambda v: Series(v).std(), 1),
+ (mom.expanding_cov, lambda v: Series(v).cov(Series(v)), None),
+ (mom.expanding_corr, lambda v: Series(v).corr(Series(v)), None),
+ (mom.expanding_var, lambda v: Series(v).var(), 1),
+ #(mom.expanding_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
+ #(mom.expanding_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
+ #(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods),
+ # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
+ (mom.expanding_median, lambda v: Series(v).median(), None),
+ (mom.expanding_max, np.nanmax, 1),
+ (mom.expanding_min, np.nanmin, 1),
+ (mom.expanding_sum, np.nansum, 1),
+ ]
+ if np.__version__ >= LooseVersion('1.8.0'):
+ base_functions += [
+ (mom.expanding_mean, np.nanmean, 1),
+ (mom.expanding_std, lambda v: np.nanstd(v, ddof=1), 1),
+ (mom.expanding_var, lambda v: np.nanvar(v, ddof=1), 1),
+ ]
+ if np.__version__ >= LooseVersion('1.9.0'):
+ base_functions += [
+ (mom.expanding_median, np.nanmedian, 1),
+ ]
+ no_nan_functions = [
+ (mom.expanding_max, np.max, None),
+ (mom.expanding_min, np.min, None),
+ (mom.expanding_sum, np.sum, None),
+ (mom.expanding_mean, np.mean, None),
+ (mom.expanding_std, lambda v: np.std(v, ddof=1), 1),
+ (mom.expanding_var, lambda v: np.var(v, ddof=1), 1),
+ (mom.expanding_median, np.median, None),
+ ]
+
for min_periods in [0, 1, 2, 3, 4]:
# test consistency between different expanding_* moments
@@ -895,25 +935,15 @@ def test_expanding_consistency(self):
var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
)
- # test consistency between expanding_xyz() and expanding_apply of Series/DataFrame.xyz()
+ # test consistency between expanding_xyz() and either (a) expanding_apply of Series.xyz(),
+ # or (b) expanding_apply of np.nanxyz()
for x in self._test_data():
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
- for (expanding_f, f, require_min_periods) in [
- (mom.expanding_count, lambda v: Series(v).count(), None),
- (mom.expanding_max, lambda v: Series(v).max(), None),
- (mom.expanding_min, lambda v: Series(v).min(), None),
- (mom.expanding_sum, lambda v: Series(v).sum(), None),
- (mom.expanding_mean, lambda v: Series(v).mean(), None),
- (mom.expanding_std, lambda v: Series(v).std(), 1),
- (mom.expanding_cov, lambda v: Series(v).cov(Series(v)), None),
- (mom.expanding_corr, lambda v: Series(v).corr(Series(v)), None),
- (mom.expanding_var, lambda v: Series(v).var(), 1),
- #(mom.expanding_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
- #(mom.expanding_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
- #(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods),
- # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
- (mom.expanding_median, lambda v: Series(v).median(), None),
- ]:
+ functions = base_functions
+ # GH 8269
+ if x.notnull().all().all():
+ functions = base_functions + no_nan_functions
+ for (expanding_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
@@ -938,7 +968,46 @@ def test_expanding_consistency(self):
assert_panel_equal(expanding_f_result, expected)
def test_rolling_consistency(self):
- for window in [1, 3, 10, 20]:
+ base_functions = [
+ (mom.rolling_count, lambda v: Series(v).count(), None),
+ (mom.rolling_max, lambda v: Series(v).max(), None),
+ (mom.rolling_min, lambda v: Series(v).min(), None),
+ (mom.rolling_sum, lambda v: Series(v).sum(), None),
+ (mom.rolling_mean, lambda v: Series(v).mean(), None),
+ (mom.rolling_std, lambda v: Series(v).std(), 1),
+ (mom.rolling_cov, lambda v: Series(v).cov(Series(v)), None),
+ (mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None),
+ (mom.rolling_var, lambda v: Series(v).var(), 1),
+ #(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
+ # (mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
+ #(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center),
+ # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
+ (mom.rolling_median, lambda v: Series(v).median(), None),
+ (mom.rolling_max, np.nanmax, 1),
+ (mom.rolling_min, np.nanmin, 1),
+ (mom.rolling_sum, np.nansum, 1),
+ ]
+ if np.__version__ >= LooseVersion('1.8.0'):
+ base_functions += [
+ (mom.rolling_mean, np.nanmean, 1),
+ (mom.rolling_std, lambda v: np.nanstd(v, ddof=1), 1),
+ (mom.rolling_var, lambda v: np.nanvar(v, ddof=1), 1),
+ ]
+ if np.__version__ >= LooseVersion('1.9.0'):
+ base_functions += [
+ (mom.rolling_median, np.nanmedian, 1),
+ ]
+ no_nan_functions = [
+ (mom.rolling_max, np.max, None),
+ (mom.rolling_min, np.min, None),
+ (mom.rolling_sum, np.sum, None),
+ (mom.rolling_mean, np.mean, None),
+ (mom.rolling_std, lambda v: np.std(v, ddof=1), 1),
+ (mom.rolling_var, lambda v: np.var(v, ddof=1), 1),
+ (mom.rolling_median, np.median, None),
+ ]
+
+ for window in [1, 2, 3, 10, 20]:
for min_periods in set([0, 1, 2, 3, 4, window]):
if min_periods and (min_periods > window):
continue
@@ -962,25 +1031,15 @@ def test_rolling_consistency(self):
(mom.rolling_count(x, window=window, center=center) - 1.).replace(0., np.nan)),
)
- # test consistency between rolling_xyz and rolling_apply of Series/DataFrame.xyz
+ # test consistency between rolling_xyz() and either (a) rolling_apply of Series.xyz(),
+ # or (b) rolling_apply of np.nanxyz()
for x in self._test_data():
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
- for (rolling_f, f, require_min_periods) in [
- (mom.rolling_count, lambda v: Series(v).count(), None),
- (mom.rolling_max, lambda v: Series(v).max(), None),
- (mom.rolling_min, lambda v: Series(v).min(), None),
- (mom.rolling_sum, lambda v: Series(v).sum(), None),
- (mom.rolling_mean, lambda v: Series(v).mean(), None),
- (mom.rolling_std, lambda v: Series(v).std(), 1),
- (mom.rolling_cov, lambda v: Series(v).cov(Series(v)), None),
- (mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None),
- (mom.rolling_var, lambda v: Series(v).var(), 1),
- #(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
- # (mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
- #(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center),
- # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
- (mom.rolling_median, lambda v: Series(v).median(), None),
- ]:
+ functions = base_functions
+ # GH 8269
+ if x.notnull().all().all():
+ functions = base_functions + no_nan_functions
+ for (rolling_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
| Closes #8269.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8275 | 2014-09-15T18:25:33Z | 2014-09-17T13:07:02Z | 2014-09-17T13:07:02Z | 2014-09-17T13:24:32Z |
TST: tests for reduction of series with nan timedelta (GH7661) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 1153daa183c97..fcec26f7ff7b7 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -479,7 +479,7 @@ TimedeltaIndex/Scalar
We introduce a new scalar type ``Timedelta``, which is a subclass of ``datetime.timedelta``, and behaves in a similar manner,
but allows compatibility with ``np.timedelta64`` types as well as a host of custom representation, parsing, and attributes.
This type is very similar to how ``Timestamp`` works for ``datetimes``. It is a nice-API box for the type. See the :ref:`docs <timedeltas.timedeltas>`.
-(:issue:`3009`, :issue:`4533`, :issue:`8209`, :issue:`8187`, :issue:`8190`, :issue:`7869`)
+(:issue:`3009`, :issue:`4533`, :issue:`8209`, :issue:`8187`, :issue:`8190`, :issue:`7869`, :issue:`7661`)
.. warning::
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 0b76d6247060d..8b9fee0e56172 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -2371,6 +2371,10 @@ def test_quantile(self):
q = tds.quantile(.25)
self.assertEqual(q, pd.to_timedelta('24:00:00'))
+ # GH7661
+ result = Series([np.timedelta64('NaT')]).sum()
+ self.assertTrue(result is pd.NaT)
+
def test_quantile_multi(self):
from numpy import percentile
| xref #7661
just tests for coverage
| https://api.github.com/repos/pandas-dev/pandas/pulls/8268 | 2014-09-14T14:19:16Z | 2014-09-14T15:48:40Z | 2014-09-14T15:48:40Z | 2014-09-14T15:48:40Z |
Minor doc clean for ambiguous time handling | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index a23d067cefa4f..ce079920d4730 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1513,7 +1513,9 @@ Ambiguous Times when Localizing
In some cases, localize cannot determine the DST and non-DST hours when there are
duplicates. This often happens when reading files or database records that simply
duplicate the hours. Passing ``ambiguous='infer'`` (``infer_dst`` argument in prior
-releases) into ``tz_localize`` will attempt to determine the right offset.
+releases) into ``tz_localize`` will attempt to determine the right offset. Below
+the top example will fail as it contains ambiguous times and the bottom will
+infer the right offset.
.. ipython:: python
:okexcept:
@@ -1521,9 +1523,11 @@ releases) into ``tz_localize`` will attempt to determine the right offset.
rng_hourly = DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00',
'11/06/2011 01:00', '11/06/2011 02:00',
'11/06/2011 03:00'])
- rng_hourly.tz_localize('US/Eastern')
+
+ # This will fail as there are ambiguous times
+ rng_hourly.tz_localize('US/Eastern')
rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', ambiguous='infer')
- rng_hourly_eastern.values
+ rng_hourly_eastern.tolist()
In addition to 'infer', there are several other arguments supported. Passing
an array-like of bools or 0s/1s where True represents a DST hour and False a
@@ -1536,8 +1540,8 @@ constructor as well as ``tz_localize``.
.. ipython:: python
rng_hourly_dst = np.array([1, 1, 0, 0, 0])
- rng_hourly.tz_localize('US/Eastern', ambiguous=rng_hourly_dst).values
- rng_hourly.tz_localize('US/Eastern', ambiguous='NaT').values
+ rng_hourly.tz_localize('US/Eastern', ambiguous=rng_hourly_dst).tolist()
+ rng_hourly.tz_localize('US/Eastern', ambiguous='NaT').tolist()
.. _timeseries.timedeltas:
| https://api.github.com/repos/pandas-dev/pandas/pulls/8267 | 2014-09-14T14:05:26Z | 2014-09-14T17:10:17Z | 2014-09-14T17:10:17Z | 2014-09-14T17:10:22Z | |
DOC: Add notes explaining params is db driver dep. GH7573 | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 462179b442ac0..e68a842c5dd27 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -341,7 +341,11 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
- List of parameters to pass to execute method.
+ List of parameters to pass to execute method. The syntax used
+ to pass parameters is database driver dependent. Check your
+ database driver documentation for which of the five syntax styles,
+ described in PEP 249's paramstyle, is supported.
+ Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
@@ -387,7 +391,11 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
- List of parameters to pass to execute method.
+ List of parameters to pass to execute method. The syntax used
+ to pass parameters is database driver dependent. Check your
+ database driver documentation for which of the five syntax styles,
+ described in PEP 249's paramstyle, is supported.
+ Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
| This is my first PR. It's just an update to two doc strings.
Closes #7573
| https://api.github.com/repos/pandas-dev/pandas/pulls/8266 | 2014-09-14T05:49:51Z | 2014-09-16T06:14:26Z | 2014-09-16T06:14:26Z | 2014-10-08T05:31:22Z |
BUG: Bug in inference in a MultiIndex with datetime.date inputs (GH7888) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 5a4f3b7da4843..8162740e63ee4 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -676,8 +676,8 @@ Enhancements
-- ``tz_localize`` now accepts the ``ambiguous`` keyword which allows for passing an array of bools
- indicating whether the date belongs in DST or not, 'NaT' for setting transition times to NaT,
+- ``tz_localize`` now accepts the ``ambiguous`` keyword which allows for passing an array of bools
+ indicating whether the date belongs in DST or not, 'NaT' for setting transition times to NaT,
'infer' for inferring DST/non-DST, and 'raise' (default) for an AmbiguousTimeError to be raised (:issue:`7943`).
See :ref:`the docs<timeseries.timezone_ambiguous>` for more details.
@@ -756,7 +756,7 @@ Bug Fixes
- Bug in HDFStore iteration when passing a where (:issue:`8014`)
- Bug in DataFrameGroupby.transform when transforming with a passed non-sorted key (:issue:`8046`)
- Bug in repeated timeseries line and area plot may result in ``ValueError`` or incorrect kind (:issue:`7733`)
-
+- Bug in inference in a MultiIndex with ``datetime.date`` inputs (:issue:`7888`)
- Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may reset nanosecond (:issue:`7697`)
- Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may raise ``AttributeError`` if ``Timestamp`` has ``dateutil`` tzinfo (:issue:`7697`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 0d1876f213cc7..1e9f7d69c9341 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -232,7 +232,7 @@ def __init__(self, values, levels=None, ordered=None, name=None, fastpath=False,
# which is fine, but since factorize does this correctly no need here
# this is an issue because _sanitize_array also coerces np.nan to a string
# under certain versions of numpy as well
- values = com._possibly_infer_to_datetimelike(values)
+ values = com._possibly_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 3695bc1898091..244dcbcde32dc 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1961,15 +1961,24 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
return value
-def _possibly_infer_to_datetimelike(value):
- # we might have a array (or single object) that is datetime like,
- # and no dtype is passed don't change the value unless we find a
- # datetime/timedelta set
+def _possibly_infer_to_datetimelike(value, convert_dates=False):
+ """
+ we might have a array (or single object) that is datetime like,
+ and no dtype is passed don't change the value unless we find a
+ datetime/timedelta set
+
+ this is pretty strict in that a datetime/timedelta is REQUIRED
+ in addition to possible nulls/string likes
+
+ ONLY strings are NOT datetimelike
- # this is pretty strict in that a datetime/timedelta is REQUIRED
- # in addition to possible nulls/string likes
+ Parameters
+ ----------
+ convert_dates : boolean, default False
+ if True try really hard to convert dates (such as datetime.date), other
+ leave inferred dtype 'date' alone
- # ONLY strings are NOT datetimelike
+ """
v = value
if not is_list_like(v):
@@ -2011,7 +2020,7 @@ def _try_timedelta(v):
sample = v[:min(3,len(v))]
inferred_type = lib.infer_dtype(sample)
- if inferred_type in ['datetime', 'datetime64']:
+ if inferred_type in ['datetime', 'datetime64'] or (convert_dates and inferred_type in ['date']):
value = _try_datetime(v)
elif inferred_type in ['timedelta', 'timedelta64']:
value = _try_timedelta(v)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index a0c5d3ce5959a..0bfa9be2feacf 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1,12 +1,13 @@
# pylint: disable-msg=W0612,E1101,W0141
import datetime
+import itertools
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
-from pandas import Panel, DataFrame, Series, notnull, isnull
+from pandas import Panel, DataFrame, Series, notnull, isnull, Timestamp
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
@@ -2066,6 +2067,17 @@ def test_datetimeindex(self):
self.assertTrue(idx.levels[0].equals(expected1))
self.assertTrue(idx.levels[1].equals(idx2))
+ # from datetime combos
+ # GH 7888
+ date1 = datetime.date.today()
+ date2 = datetime.datetime.today()
+ date3 = Timestamp.today()
+
+ for d1, d2 in itertools.product([date1,date2,date3],[date1,date2,date3]):
+ index = pd.MultiIndex.from_product([[d1],[d2]])
+ self.assertIsInstance(index.levels[0],pd.DatetimeIndex)
+ self.assertIsInstance(index.levels[1],pd.DatetimeIndex)
+
def test_set_index_datetime(self):
# GH 3950
df = pd.DataFrame({'label':['a', 'a', 'a', 'b', 'b', 'b'],
| closes #7888
| https://api.github.com/repos/pandas-dev/pandas/pulls/8264 | 2014-09-13T23:18:33Z | 2014-09-14T01:09:42Z | 2014-09-14T01:09:42Z | 2014-09-14T01:09:42Z |
Clarification to .ix functionality | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index c458dac22acca..4d648ed3a5d88 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -96,14 +96,18 @@ of multi-axis indexing.
See more at :ref:`Selection by Position <indexing.integer>`
- ``.ix`` supports mixed integer and label based access. It is primarily label
- based, but will fall back to integer positional access. ``.ix`` is the most
- general and will support any of the inputs to ``.loc`` and ``.iloc``, as well
- as support for floating point label schemes. ``.ix`` is especially useful
- when dealing with mixed positional and label based hierarchical indexes.
- As using integer slices with ``.ix`` have different behavior depending on
- whether the slice is interpreted as position based or label based, it's
+ based, but will fall back to integer positional access unless the corresponding
+ row or column index is of integer type. ``.ix`` is the most general and will
+ support any of the inputs in ``.loc`` and ``.iloc`` with a notable exception that
+ ``.ix`` does not support positional access in the case when the corresponding
+ row or column index is of integer type. ``.ix`` also supports floating point
+ label schemes. ``.ix`` is exceptionally useful when dealing with mixed positional
+ and label based hierachical indexes.
+
+ When the DataFrame has an integer index or integer columns, positional access
+ is not supported in the corresponding dimension. Thus, in such cases it's
usually better to be explicit and use ``.iloc`` or ``.loc``.
-
+
See more at :ref:`Advanced Indexing <advanced>`, :ref:`Advanced
Hierarchical <advanced.advanced_hierarchical>` and :ref:`Fallback Indexing
<advanced.fallback>`
| Made clear that .ix does not support positional indexing
in the case where the corresponding dimension has an
integer index.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8262 | 2014-09-13T14:13:18Z | 2014-09-13T22:42:12Z | 2014-09-13T22:42:12Z | 2015-01-17T09:59:00Z |
BUG: plot methods modified rcParams | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index fe1b1dcb90032..64b3427230eeb 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -794,6 +794,6 @@ needed interpolating (:issue:`7173`).
(:issue:`8230`).
- Bug with ``DatetimeIndex.asof`` incorrectly matching partial strings and
returning the wrong date (:issue:`8245`).
-
-
+- Bug in plotting methods modifying the global matplotlib
+rcParams (:issue:`8242`).
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index a7e3d39692d27..96bde2d222bca 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -479,6 +479,12 @@ def test_plot_figsize_and_title(self):
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
+ def test_dont_modify_rcParams(self):
+ # GH 8242
+ colors = self.plt.rcParams['axes.color_cycle']
+ Series([1, 2, 3]).plot()
+ self.assertEqual(colors, self.plt.rcParams['axes.color_cycle'])
+
def test_ts_line_lim(self):
ax = self.ts.plot()
xmin, xmax = ax.get_xlim()
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index a6930280e6421..190b178e37ad3 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -116,7 +116,10 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
colors = color
else:
if color_type == 'default':
- colors = plt.rcParams.get('axes.color_cycle', list('bgrcmyk'))
+ # need to call list() on the result to copy so we don't
+ # modify the global rcParams below
+ colors = list(plt.rcParams.get('axes.color_cycle',
+ list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
| Closes https://github.com/pydata/pandas/issues/8242
Just made a copy of the rcParams when we get it.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8249 | 2014-09-12T00:22:20Z | 2014-09-12T17:19:35Z | 2014-09-12T17:19:35Z | 2016-11-03T12:38:12Z |
DOC: Correct Series.nonzero docstring | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2c166c231ae34..519e4c4457f04 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -331,11 +331,27 @@ def compress(self, condition, axis=0, out=None, **kwargs):
def nonzero(self):
"""
- return the a boolean array of the underlying data is nonzero
+ Return the indices of the elements that are non-zero
- See also
+ This method is equivalent to calling `numpy.nonzero` on the
+ series data. For compatability with NumPy, the return value is
+ the same (a tuple with an array of indices for each dimension),
+ but it will always be a one-item tuple because series only have
+ one dimension.
+
+ Examples
+ --------
+ >>> s = pd.Series([0, 3, 0, 4])
+ >>> s.nonzero()
+ (array([1, 3]),)
+ >>> s.iloc[s.nonzero()[0]]
+ 1 3
+ 3 4
+ dtype: int64
+
+ See Also
--------
- numpy.ndarray.nonzero
+ numpy.nonzero
"""
return self.values.nonzero()
| The function returns indices, not a boolean array. The description now
matches `numpy.nonzero`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8247 | 2014-09-11T19:44:14Z | 2014-09-12T14:52:16Z | 2014-09-12T14:52:16Z | 2014-09-12T14:54:49Z |
BUG: DatetimeIndex.asof matches partial dates | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 96d9b7c58c41a..3a3eac835f7e2 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -770,3 +770,8 @@ Bug Fixes
needed interpolating (:issue:`7173`).
- Bug where ``col_space`` was ignored in ``DataFrame.to_string()`` when ``header=False``
(:issue:`8230`).
+- Bug with ``DatetimeIndex.asof`` incorrectly matching partial strings and
+returning the wrong date (:issue:`8245`).
+
+
+
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 61fb3bffc55a4..961e488026731 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1052,6 +1052,9 @@ def asof(self, label):
if isinstance(label, (Index, ABCSeries, np.ndarray)):
raise TypeError('%s' % type(label))
+ if not isinstance(label, Timestamp):
+ label = Timestamp(label)
+
if label not in self:
loc = self.searchsorted(label, side='left')
if loc > 0:
@@ -1059,8 +1062,6 @@ def asof(self, label):
else:
return np.nan
- if not isinstance(label, Timestamp):
- label = Timestamp(label)
return label
def asof_locs(self, where, mask):
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 97149e8f224d3..8d0b54f2ef0b4 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -390,6 +390,12 @@ def test_asof(self):
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
+ def test_asof_datetime_partial(self):
+ idx = pd.date_range('2010-01-01', periods=2, freq='m')
+ expected = Timestamp('2010-01-31')
+ result = idx.asof('2010-02')
+ self.assertEqual(result, expected)
+
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
| Closes https://github.com/pydata/pandas/issues/8245
current master:
``` python
In [6]: idx.asof('2010-02')
Out[6]: Timestamp('2010-02-01 00:00:00')
```
fixed:
``` python
In [3]: idx.asof('2010-02')
Out[3]: Timestamp('2010-01-31 00:00:00', offset='M')
```
Will try to do a test_perf on this later. I've been working in python 3, but I don't think vbench is ported for that.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8246 | 2014-09-11T17:30:33Z | 2014-09-11T23:38:48Z | 2014-09-11T23:38:47Z | 2016-11-03T12:38:11Z |
BUG: normalize rolling_window() weights, remove scikits-timeseries dependency for testing | diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index b8559eb51ece8..56dc551268a37 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -310,7 +310,7 @@ keyword. The list of recognized types are:
rolling_window(ser, 5, 'triang')
-Note that the ``boxcar`` window is equivalent to ``rolling_mean``:
+Note that the ``boxcar`` window is equivalent to ``rolling_mean``.
.. ipython:: python
@@ -336,6 +336,19 @@ This keyword is available in other rolling functions as well.
rolling_mean(ser, 5, center=True)
+.. _stats.moments.normalization
+
+.. note::
+
+ In rolling sum mode (``mean=False``) there is no normalization done to the
+ weights. Passing custom weights of ``[1, 1, 1]`` will yield a different
+ result than passing weights of ``[2, 2, 2]``, for example. When passing a
+ ``win_type`` instead of explicitly specifying the weights, the weights are
+ already normalized so that the largest weight is 1.
+
+ In contrast, the nature of the rolling mean calculation (``mean=True``)is
+ such that the weights are normalized with respect to each other. Weights
+ of ``[1, 1, 1]`` and ``[2, 2, 2]`` yield the same result.
.. _stats.moments.binary:
@@ -610,4 +623,4 @@ are scaled by debiasing factors
(For :math:`w_i = 1`, this reduces to the usual :math:`N / (N - 1)` factor,
with :math:`N = t + 1`.)
See http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
-for further details.
\ No newline at end of file
+for further details.
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index f49c919e80d50..73b8b7ddbcba3 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -383,6 +383,34 @@ Rolling/Expanding Moments API changes
rolling_sum(Series(range(4)), window=3, min_periods=0, center=True)
+- :func:`rolling_window` now normalizes the weights properly in rolling mean mode (`mean=True`) so that
+ the calculated weighted means (e.g. 'triang', 'gaussian') are distributed about the same means as those
+ calculated without weighting (i.e. 'boxcar'). See :ref:`the note on normalization
+ <stats.moments.normalization>` for further details. (:issue:`7618`)
+
+ .. ipython:: python
+
+ s = Series([10.5, 8.8, 11.4, 9.7, 9.3])
+
+ Behavior prior to 0.15.0:
+
+ .. code-block:: python
+
+ In [39]: rolling_window(s, window=3, win_type='triang', center=True)
+ Out[39]:
+ 0 NaN
+ 1 6.583333
+ 2 6.883333
+ 3 6.683333
+ 4 NaN
+ dtype: float64
+
+ New behavior
+
+ .. ipython:: python
+
+ rolling_window(s, window=3, win_type='triang', center=True)
+
- Removed ``center`` argument from :func:`expanding_max`, :func:`expanding_min`, :func:`expanding_sum`,
:func:`expanding_mean`, :func:`expanding_median`, :func:`expanding_std`, :func:`expanding_var`,
:func:`expanding_skew`, :func:`expanding_kurt`, :func:`expanding_quantile`, :func:`expanding_count`,
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 8f37d76e50f9c..316a282b71609 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -1897,7 +1897,7 @@ def roll_generic(ndarray[float64_t, cast=True] input,
def roll_window(ndarray[float64_t, ndim=1, cast=True] input,
ndarray[float64_t, ndim=1, cast=True] weights,
- int minp, bint avg=True, bint avg_wgt=False):
+ int minp, bint avg=True):
"""
Assume len(weights) << len(input)
"""
@@ -1915,7 +1915,7 @@ def roll_window(ndarray[float64_t, ndim=1, cast=True] input,
minp = _check_minp(len(weights), minp, in_n)
- if avg_wgt:
+ if avg:
for win_i from 0 <= win_i < win_n:
val_win = weights[win_i]
if val_win != val_win:
@@ -1956,8 +1956,6 @@ def roll_window(ndarray[float64_t, ndim=1, cast=True] input,
c = counts[in_i]
if c < minp:
output[in_i] = NaN
- elif avg:
- output[in_i] /= c
return output
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 94c2521ff6938..fab25f955fa76 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -65,47 +65,40 @@ def test_rolling_mean(self):
self._check_moment_func(mom.rolling_mean, np.mean)
def test_cmov_mean(self):
+ # GH 8238
tm._skip_if_no_scipy()
- try:
- from scikits.timeseries.lib import cmov_mean
- except ImportError:
- raise nose.SkipTest("no scikits.timeseries")
- vals = np.random.randn(10)
- xp = cmov_mean(vals, 5)
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49,
+ 16.68, 9.48, 10.63, 14.48])
+ xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
+ 12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_mean(vals, 5, center=True)
- assert_almost_equal(xp.compressed(), rs[2:-2])
- assert_almost_equal(xp.mask, np.isnan(rs))
+ assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_mean(Series(vals), 5, center=True)
assert_series_equal(xp, rs)
def test_cmov_window(self):
+ # GH 8238
tm._skip_if_no_scipy()
- try:
- from scikits.timeseries.lib import cmov_window
- except ImportError:
- raise nose.SkipTest("no scikits.timeseries")
- vals = np.random.randn(10)
- xp = cmov_window(vals, 5, 'boxcar')
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
+ 13.49, 16.68, 9.48, 10.63, 14.48])
+ xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
+ 12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
- assert_almost_equal(xp.compressed(), rs[2:-2])
- assert_almost_equal(xp.mask, np.isnan(rs))
+ assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_window(Series(vals), 5, 'boxcar', center=True)
assert_series_equal(xp, rs)
def test_cmov_window_corner(self):
+ # GH 8238
tm._skip_if_no_scipy()
- try:
- from scikits.timeseries.lib import cmov_window
- except ImportError:
- raise nose.SkipTest("no scikits.timeseries")
# all nan
vals = np.empty(10, dtype=float)
@@ -125,24 +118,37 @@ def test_cmov_window_corner(self):
self.assertEqual(len(rs), 5)
def test_cmov_window_frame(self):
+ # Gh 8238
tm._skip_if_no_scipy()
- try:
- from scikits.timeseries.lib import cmov_window
- except ImportError:
- raise nose.SkipTest("no scikits.timeseries")
+
+ vals = np.array([[ 12.18, 3.64],
+ [ 10.18, 9.16],
+ [ 13.24, 14.61],
+ [ 4.51, 8.11],
+ [ 6.15, 11.44],
+ [ 9.14, 6.21],
+ [ 11.31, 10.67],
+ [ 2.94, 6.51],
+ [ 9.42, 8.39],
+ [ 12.44, 7.34 ]])
+
+ xp = np.array([[ np.nan, np.nan],
+ [ np.nan, np.nan],
+ [ 9.252, 9.392],
+ [ 8.644, 9.906],
+ [ 8.87 , 10.208],
+ [ 6.81 , 8.588],
+ [ 7.792, 8.644],
+ [ 9.05 , 7.824],
+ [ np.nan, np.nan],
+ [ np.nan, np.nan]])
# DataFrame
- vals = np.random.randn(10, 2)
- xp = cmov_window(vals, 5, 'boxcar')
rs = mom.rolling_window(DataFrame(vals), 5, 'boxcar', center=True)
assert_frame_equal(DataFrame(xp), rs)
def test_cmov_window_na_min_periods(self):
tm._skip_if_no_scipy()
- try:
- from scikits.timeseries.lib import cmov_window
- except ImportError:
- raise nose.SkipTest("no scikits.timeseries")
# min_periods
vals = Series(np.random.randn(10))
@@ -155,39 +161,136 @@ def test_cmov_window_na_min_periods(self):
assert_series_equal(xp, rs)
def test_cmov_window_regular(self):
+ # GH 8238
tm._skip_if_no_scipy()
- try:
- from scikits.timeseries.lib import cmov_window
- except ImportError:
- raise nose.SkipTest("no scikits.timeseries")
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
+
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
+ 13.49, 16.68, 9.48, 10.63, 14.48])
+ xps = {
+ 'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009,
+ 14.03687, 13.8567, 11.81473, np.nan, np.nan],
+ 'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556,
+ 13.33889, 13.38, 12.33667, np.nan, np.nan],
+ 'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
+ 14.3675, 14.0825, 11.5675, np.nan, np.nan],
+ 'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559,
+ 14.17267, 14.65923, 11.10401, np.nan, np.nan],
+ 'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
+ 14.02156, 15.10512, 10.74574, np.nan, np.nan],
+ 'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671,
+ 14.03559, 15.05657, 10.78514, np.nan, np.nan],
+ 'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607,
+ 14.20036, 14.57726, 11.16988, np.nan, np.nan],
+ 'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
+ 14.3675, 14.0825, 11.5675, np.nan, np.nan]}
+
for wt in win_types:
- vals = np.random.randn(10)
- xp = cmov_window(vals, 5, wt)
+ xp = Series(xps[wt])
+ rs = mom.rolling_window(Series(vals), 5, wt, center=True)
+ assert_series_equal(xp, rs)
+
+ def test_cmov_window_regular_linear_range(self):
+ # GH 8238
+ tm._skip_if_no_scipy()
+ win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
+ 'blackmanharris', 'nuttall', 'barthann']
+
+ vals = np.array(range(10), dtype=np.float)
+ xp = vals.copy()
+ xp[:2] = np.nan
+ xp[-2:] = np.nan
+ xp = Series(xp)
+
+ for wt in win_types:
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
- assert_series_equal(Series(xp), rs)
+ assert_series_equal(xp, rs)
+
+ def test_cmov_window_regular_missing_data(self):
+ # GH 8238
+ tm._skip_if_no_scipy()
+
+ win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
+ 'blackmanharris', 'nuttall', 'barthann']
+
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
+ 13.49, 16.68, np.nan, 10.63, 14.48])
+ xps = {
+ 'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
+ 9.1925, 12.5575, 14.3675, 15.61667, 13.655],
+ 'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345,
+ 9.17869, 12.79607, 14.20036, 15.8706, 13.655],
+ 'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
+ 9.1925, 12.5575, 14.3675, 15.61667, 13.655],
+ 'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599,
+ 9.1764, 12.83559, 14.17267, 15.90976, 13.655],
+ 'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384,
+ 9.56348, 12.38009, 14.20565, 15.24694, 13.69758],
+ 'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618,
+ 9.16786, 13.02671, 14.03673, 16.08759, 13.65553],
+ 'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667,
+ 10.34667, 12.00556, 13.82125, 14.49429, 13.765],
+ 'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
+ 9.16438, 13.05052, 14.02175, 16.1098,
+ 13.65509]
+ }
+
+ for wt in win_types:
+ xp = Series(xps[wt])
+ rs = mom.rolling_window(Series(vals), 5, wt, min_periods=3)
+ assert_series_equal(xp, rs)
def test_cmov_window_special(self):
+ # GH 8238
tm._skip_if_no_scipy()
- try:
- from scikits.timeseries.lib import cmov_window
- except ImportError:
- raise nose.SkipTest("no scikits.timeseries")
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
+ 13.49, 16.68, 9.48, 10.63, 14.48])
+
+ xps = {
+ 'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763,
+ 13.89053, 13.65671, 12.01002, np.nan, np.nan],
+ 'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589,
+ 11.73161, 13.08516, 12.95111, 12.74577,
+ np.nan, np.nan],
+ 'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284,
+ 12.88331, 12.96079, 12.77008, np.nan, np.nan],
+ 'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161,
+ 12.75129, 12.90702, 12.83757, np.nan, np.nan]
+ }
+
for wt, k in zip(win_types, kwds):
- vals = np.random.randn(10)
- xp = cmov_window(vals, 5, (wt,) + tuple(k.values()))
+ xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
- assert_series_equal(Series(xp), rs)
+ assert_series_equal(xp, rs)
+
+ def test_cmov_window_special_linear_range(self):
+ # GH 8238
+ tm._skip_if_no_scipy()
+
+ win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
+ kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
+ {'width': 0.5}]
+
+ vals = np.array(range(10), dtype=np.float)
+ xp = vals.copy()
+ xp[:2] = np.nan
+ xp[-2:] = np.nan
+ xp = Series(xp)
+
+ for wt, k in zip(win_types, kwds):
+ rs = mom.rolling_window(Series(vals), 5, wt, center=True,
+ **k)
+ assert_series_equal(xp, rs)
def test_rolling_median(self):
self._check_moment_func(mom.rolling_median, np.median)
diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py
index 3d793698c7caa..d3dbeef1af4d2 100644
--- a/pandas/util/print_versions.py
+++ b/pandas/util/print_versions.py
@@ -68,7 +68,6 @@ def show_versions(as_json=False):
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("patsy", lambda mod: mod.__version__),
- ("scikits.timeseries", lambda mod: mod.__version__),
("dateutil", lambda mod: mod.__version__),
("pytz", lambda mod: mod.VERSION),
("bottleneck", lambda mod: mod.__version__),
| This closes #7618 and removes the dependency on scikits-timeseries for testing rolling_window functions. Scikits-timeseries has not been maintained since 2009, and I had difficulty simply installing it on my system. Instead of using scikits-timeseries to calculate the various rolling moments on randomly generated data, the tests use pre-calculated data.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8238 | 2014-09-10T21:53:49Z | 2014-09-23T13:04:18Z | 2014-09-23T13:04:18Z | 2014-09-23T14:34:58Z |
BUG: Bug in putting a PeriodIndex into a Series would convert to int64 | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index e880bb2d6b952..81c2dfd4311f9 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1125,7 +1125,7 @@ This enables nice expressions like this:
.. ipython:: python
# period
- s = Series(period_range('20130101',periods=4,freq='D').asobject)
+ s = Series(period_range('20130101',periods=4,freq='D'))
s
s.dt.year
s.dt.day
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 496230bd5f3a1..d26f0e56a9b57 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -102,7 +102,7 @@ API changes
2 6
3 NaN
dtype: float64
-
+
New behavior (note final value is ``5 = sum([2, 3, NaN])``):
.. ipython:: python
@@ -139,7 +139,7 @@ API changes
4 1.571429
5 2.189189
dtype: float64
-
+
New behavior (note values start at index ``4``, the location of the 2nd (since ``min_periods=2``) non-empty value):
.. ipython:: python
@@ -369,7 +369,7 @@ This enables nice expressions like this:
.. ipython:: python
# period
- s = Series(period_range('20130101',periods=4,freq='D').asobject)
+ s = Series(period_range('20130101',periods=4,freq='D'))
s
s.dt.year
s.dt.day
@@ -593,7 +593,7 @@ Bug Fixes
- Bug in Panel when using ``major_xs`` and ``copy=False`` is passed (deprecation warning fails because of missing ``warnings``) (:issue:`8152`).
- Bug in pickle deserialization that failed for pre-0.14.1 containers with dup items trying to avoid ambiguity
when matching block and manager items, when there's only one block there's no ambiguity (:issue:`7794`)
-
+- Bug in putting a ``PeriodIndex`` into a ``Series`` would convert to ``int64`` dtype, rather than ``object`` of ``Periods`` (:issue:`7932`)
- Bug in HDFStore iteration when passing a where (:issue:`8014`)
- Bug in DataFrameGroupby.transform when transforming with a passed non-sorted key (:issue:`8046`)
- Bug in repeated timeseries line and area plot may result in ``ValueError`` or incorrect kind (:issue:`7733`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 4abb6ed10d6a7..ee9854f8dc5f9 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -196,8 +196,9 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
"""
from pandas.core.series import Series
from pandas.tools.tile import cut
+ from pandas.tseries.period import PeriodIndex
- is_period = getattr(values, 'inferred_type', None) == 'period'
+ is_period = com.is_period_arraylike(values)
values = Series(values).values
is_category = com.is_categorical_dtype(values.dtype)
@@ -215,6 +216,9 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
dtype = values.dtype
if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)) or is_period:
+ if is_period:
+ values = PeriodIndex(values)
+
values = values.view(np.int64)
keys, counts = htable.value_count_int64(values)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index e3a0cf14cfbc1..692388e60065a 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2325,6 +2325,13 @@ def is_iterator(obj):
def is_number(obj):
return isinstance(obj, (numbers.Number, np.number))
+def is_period_arraylike(arr):
+ """ return if we are period arraylike / PeriodIndex """
+ if isinstance(arr, pd.PeriodIndex):
+ return True
+ elif isinstance(arr, (np.ndarray, ABCSeries)):
+ return arr.dtype == object and lib.infer_dtype(arr) == 'period'
+ return getattr(arr, 'inferred_type', None) == 'period'
def _coerce_to_dtype(dtype):
""" coerce a string / np.dtype to a dtype """
@@ -2380,7 +2387,6 @@ def is_datetime64_ns_dtype(arr_or_dtype):
tipo = _get_dtype(arr_or_dtype)
return tipo == _NS_DTYPE
-
def is_timedelta64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.timedelta64)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index da4dc904c71e3..64a83ce3c3137 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -110,7 +110,7 @@ def get_expected(s, name):
tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index))
# periodindex
- for s in [Series(period_range('20130101',periods=5,freq='D').asobject)]:
+ for s in [Series(period_range('20130101',periods=5,freq='D'))]:
for prop in ok_for_period:
tm.assert_series_equal(getattr(s.dt,prop),get_expected(s,prop))
@@ -747,6 +747,15 @@ def test_constructor_dtype_datetime64(self):
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
+ def test_constructor_periodindex(self):
+ # GH7932
+ # converting a PeriodIndex when put in a Series
+
+ pi = period_range('20130101',periods=5,freq='D')
+ s = Series(pi)
+ expected = Series(pi.asobject)
+ assert_series_equal(s, expected)
+
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py
index 92ccd1248fac9..d3b86d73dca3a 100644
--- a/pandas/tseries/common.py
+++ b/pandas/tseries/common.py
@@ -39,15 +39,9 @@ def maybe_to_datetimelike(data, copy=False):
if issubclass(data.dtype.type, np.datetime64):
return DatetimeProperties(DatetimeIndex(data, copy=copy), index)
else:
-
- if isinstance(data, PeriodIndex):
+ if com.is_period_arraylike(data):
return PeriodProperties(PeriodIndex(data, copy=copy), index)
- data = com._values_from_object(data)
- inferred = lib.infer_dtype(data)
- if inferred == 'period':
- return PeriodProperties(PeriodIndex(data), index)
-
raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data)))
class Properties(PandasDelegate):
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index edc7b075da6f8..873d24530d1d9 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -664,9 +664,10 @@ def infer_freq(index, warn=True):
if not (com.is_datetime64_dtype(index.values) or values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype on a Series of {0}".format(index.dtype))
index = values
- if isinstance(index, pd.PeriodIndex):
+
+ if com.is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
- "instead of using infer_freq.")
+ "instead of using infer_freq.")
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index type {0}".format(type(index)))
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index dfea3e0486d32..942a2f445fd48 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -746,6 +746,10 @@ def __contains__(self, key):
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
+ def _to_embed(self, keep_tz=False):
+ """ return an array repr of this object, potentially casting to object """
+ return self.asobject.values
+
def asof_locs(self, where, mask):
"""
where : array of timestamps
| closes #7932
| https://api.github.com/repos/pandas-dev/pandas/pulls/8237 | 2014-09-10T16:14:58Z | 2014-09-10T18:09:27Z | 2014-09-10T18:09:27Z | 2014-09-10T18:09:28Z |
BUG: interpolate with no nans and limit | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 496230bd5f3a1..029a38507275d 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -756,3 +756,8 @@ Bug Fixes
- Bug with kde plot and NaNs (:issue:`8182`)
- Bug in ``GroupBy.count`` with float32 data type were nan values were not excluded (:issue:`8169`).
- Bug with stacked barplots and NaNs (:issue:`8175`).
+
+
+
+- Bug in interpolation methods with the ``limit`` keyword when no values
+needed interpolating (:issue:`7173`).
diff --git a/pandas/core/common.py b/pandas/core/common.py
index e3a0cf14cfbc1..c0b6d9dbe15b5 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1534,6 +1534,8 @@ def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
def _interp_limit(invalid, limit):
"""mask off values that won't be filled since they exceed the limit"""
all_nans = np.where(invalid)[0]
+ if all_nans.size == 0: # no nans anyway
+ return []
violate = [invalid[x:x + limit + 1] for x in all_nans]
violate = np.array([x.all() & (x.size > limit) for x in violate])
return all_nans[violate] + limit
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index b1877ddb81ee1..2c5f100843445 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -656,6 +656,13 @@ def test_interp_datetime64(self):
expected = Series([1., 1., 3.], index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
+ def test_interp_limit_no_nans(self):
+ # GH 7173
+ s = pd.Series([1., 2., 3.])
+ result = s.interpolate(limit=1)
+ expected = s
+ assert_series_equal(result, expected)
+
def test_describe(self):
_ = self.series.describe()
_ = self.ts.describe()
| Closes https://github.com/pydata/pandas/issues/7173
I was assuming that there would be at least one NaN when I was adjusting for the limits. Pretty simple fix.
Here's the intended behavior:
``` python
In [1]: s = pd.Series([1., 2, 3])
In [2]: s.interpolate()
Out[2]:
0 1
1 2
2 3
dtype: float64
In [3]: s.interpolate(limit=1)
Out[3]:
0 1
1 2
2 3
dtype: float64
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/8235 | 2014-09-10T15:21:35Z | 2014-09-10T17:45:32Z | 2014-09-10T17:45:32Z | 2016-11-03T12:38:10Z |
Fix to merge code from `_create_table_statement` and `get_schema` | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 462179b442ac0..83b96d5186dd2 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -552,33 +552,19 @@ class PandasSQLTable(PandasObject):
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
- schema=None):
+ schema=None, keys=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
+ self.if_exists = if_exists
+ self.keys = keys
if frame is not None:
- # We want to write a frame
- if self.pd_sql.has_table(self.name, self.schema):
- if if_exists == 'fail':
- raise ValueError("Table '%s' already exists." % name)
- elif if_exists == 'replace':
- self.pd_sql.drop_table(self.name, self.schema)
- self.table = self._create_table_setup()
- self.create()
- elif if_exists == 'append':
- self.table = self.pd_sql.get_table(self.name, self.schema)
- if self.table is None:
- self.table = self._create_table_setup()
- else:
- raise ValueError(
- "'{0}' is not valid for if_exists".format(if_exists))
- else:
- self.table = self._create_table_setup()
- self.create()
+ # We want to initialize based on a dataframe
+ self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
@@ -593,9 +579,26 @@ def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table))
- def create(self):
+ def _execute_create(self):
+ # Inserting table into database, add to MetaData object
+ self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
+ def create(self):
+ if self.exists():
+ if self.if_exists == 'fail':
+ raise ValueError("Table '%s' already exists." % self.name)
+ elif self.if_exists == 'replace':
+ self.pd_sql.drop_table(self.name, self.schema)
+ self._execute_create()
+ elif self.if_exists == 'append':
+ pass
+ else:
+ raise ValueError(
+ "'{0}' is not valid for if_exists".format(self.if_exists))
+ else:
+ self._execute_create()
+
def insert_statement(self):
return self.table.insert()
@@ -634,9 +637,15 @@ def insert_data(self):
return column_names, data_list
- def insert(self, chunksize=None):
+ def get_session(self):
+ con = self.pd_sql.engine.connect()
+ return con.begin()
- ins = self.insert_statement()
+ def _execute_insert(self, trans, keys, data_iter):
+ data = [dict( (k, v) for k, v in zip(keys, row) ) for row in data_iter]
+ trans.connection.execute(self.insert_statement(), data)
+
+ def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
@@ -644,18 +653,15 @@ def insert(self, chunksize=None):
chunksize = nrows
chunks = int(nrows / chunksize) + 1
- con = self.pd_sql.engine.connect()
- with con.begin() as trans:
+ with self.get_session() as trans:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
- chunk_list = [arr[start_i:end_i] for arr in data_list]
- insert_list = [dict((k, v) for k, v in zip(keys, row))
- for row in zip(*chunk_list)]
- con.execute(ins, insert_list)
+ chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
+ self._execute_insert(trans, keys, chunk_iter)
def read(self, coerce_float=True, parse_dates=None, columns=None):
@@ -729,7 +735,7 @@ def _get_column_names_and_types(self, dtype_mapper):
return column_names_and_types
def _create_table_setup(self):
- from sqlalchemy import Table, Column
+ from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
@@ -737,7 +743,19 @@ def _create_table_setup(self):
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
- return Table(self.name, self.pd_sql.meta, *columns, schema=self.schema)
+ if self.keys is not None:
+ columns.append(PrimaryKeyConstraint(self.keys,
+ name=self.name+'_pk'))
+
+
+ schema = self.schema or self.pd_sql.meta.schema
+
+ # At this point, attach to new metadata, only attach to self.meta
+ # once table is created.
+ from sqlalchemy.schema import MetaData
+ meta = MetaData(self.pd_sql, schema=schema)
+
+ return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
""" Make a data_frame's column type align with an sql_table
@@ -872,7 +890,6 @@ def execute(self, *args, **kwargs):
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None):
-
table = PandasSQLTable(
table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
@@ -901,6 +918,7 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
table = PandasSQLTable(
name, self, frame=frame, index=index, if_exists=if_exists,
index_label=index_label, schema=schema)
+ table.create()
table.insert(chunksize)
# check for potentially case sensitivity issues (GH7815)
if name not in self.engine.table_names(schema=schema or self.meta.schema):
@@ -930,8 +948,9 @@ def drop_table(self, table_name, schema=None):
self.get_table(table_name, schema).drop()
self.meta.clear()
- def _create_sql_schema(self, frame, table_name):
- table = PandasSQLTable(table_name, self, frame=frame)
+ def _create_sql_schema(self, frame, table_name, keys=None):
+ table = PandasSQLTable(table_name, self, frame=frame, index=False,
+ keys=keys)
return str(table.sql_schema())
@@ -997,8 +1016,8 @@ class PandasSQLTableLegacy(PandasSQLTable):
def sql_schema(self):
return str(";\n".join(self.table))
- def create(self):
- with self.pd_sql.con:
+ def _execute_create(self):
+ with self.get_session():
for stmt in self.table:
self.pd_sql.execute(stmt)
@@ -1019,28 +1038,12 @@ def insert_statement(self):
self.name, col_names, wildcards)
return insert_statement
- def insert(self, chunksize=None):
-
- ins = self.insert_statement()
- keys, data_list = self.insert_data()
-
- nrows = len(self.frame)
- if chunksize is None:
- chunksize = nrows
- chunks = int(nrows / chunksize) + 1
+ def get_session(self):
+ return self.pd_sql.con
- with self.pd_sql.con:
- for i in range(chunks):
- start_i = i * chunksize
- end_i = min((i + 1) * chunksize, nrows)
- if start_i >= end_i:
- break
- chunk_list = [arr[start_i:end_i] for arr in data_list]
- insert_list = [tuple((v for v in row))
- for row in zip(*chunk_list)]
- cur = self.pd_sql.con.cursor()
- cur.executemany(ins, insert_list)
- cur.close()
+ def _execute_insert(self, trans, keys, data_iter):
+ data_list = list(data_iter)
+ trans.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""Return a list of SQL statement that create a table reflecting the
@@ -1061,21 +1064,25 @@ def _create_table_setup(self):
br_l = _SQL_SYMB[flv]['br_l'] # left val quote char
br_r = _SQL_SYMB[flv]['br_r'] # right val quote char
- col_template = br_l + '%s' + br_r + ' %s'
-
- columns = ',\n '.join(col_template % (cname, ctype)
- for cname, ctype, _ in column_names_and_types)
- template = """CREATE TABLE %(name)s (
- %(columns)s
- )"""
- create_stmts = [template % {'name': self.name, 'columns': columns}, ]
-
- ix_tpl = "CREATE INDEX ix_{tbl}_{col} ON {tbl} ({br_l}{col}{br_r})"
- for cname, _, is_index in column_names_and_types:
- if not is_index:
- continue
- create_stmts.append(ix_tpl.format(tbl=self.name, col=cname,
- br_l=br_l, br_r=br_r))
+ create_tbl_stmts = [(br_l + '%s' + br_r + ' %s') % (cname, ctype)
+ for cname, ctype, _ in column_names_and_types]
+ if self.keys is not None and len(self.keys):
+ cnames_br = ",".join([br_l + c + br_r for c in self.keys])
+ create_tbl_stmts.append(
+ "CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
+ tbl=self.name, cnames_br=cnames_br))
+
+ create_stmts = ["CREATE TABLE " + self.name + " (\n" +
+ ',\n '.join(create_tbl_stmts) + "\n)"]
+
+ ix_cols = [cname for cname, _, is_index in column_names_and_types
+ if is_index]
+ if len(ix_cols):
+ cnames = "_".join(ix_cols)
+ cnames_br = ",".join([br_l + c + br_r for c in ix_cols])
+ create_stmts.append(
+ "CREATE INDEX ix_{tbl}_{cnames} ON {tbl} ({cnames_br})".format(
+ tbl=self.name, cnames=cnames, cnames_br=cnames_br))
return create_stmts
@@ -1172,16 +1179,28 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
----------
frame: DataFrame
name: name of SQL table
- flavor: {'sqlite', 'mysql'}, default 'sqlite'
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
+ index : boolean, default True
+ Write DataFrame index as a column
+ index_label : string or sequence, default None
+ Column label for index column(s). If None is given (default) and
+ `index` is True, then the index names are used.
+ A sequence should be given if the DataFrame uses MultiIndex.
+ schema : string, default None
+ Ignored parameter included for compatability with SQLAlchemy version
+ of `to_sql`.
+ chunksize : int, default None
+ If not None, then rows will be written in batches of this size at a
+ time. If None, all rows will be written at once.
"""
table = PandasSQLTableLegacy(
name, self, frame=frame, index=index, if_exists=if_exists,
index_label=index_label)
+ table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
@@ -1200,8 +1219,9 @@ def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % name
self.execute(drop_sql)
- def _create_sql_schema(self, frame, table_name):
- table = PandasSQLTableLegacy(table_name, self, frame=frame)
+ def _create_sql_schema(self, frame, table_name, keys=None):
+ table = PandasSQLTableLegacy(table_name, self, frame=frame, index=False,
+ keys=keys)
return str(table.sql_schema())
@@ -1227,58 +1247,8 @@ def get_schema(frame, name, flavor='sqlite', keys=None, con=None):
"""
- if con is None:
- if flavor == 'mysql':
- warnings.warn(_MYSQL_WARNING, FutureWarning)
- return _get_schema_legacy(frame, name, flavor, keys)
-
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
- return pandas_sql._create_sql_schema(frame, name)
-
-
-def _get_schema_legacy(frame, name, flavor, keys=None):
- """Old function from 0.13.1. To keep backwards compatibility.
- When mysql legacy support is dropped, it should be possible to
- remove this code
- """
-
- def get_sqltype(dtype, flavor):
- pytype = dtype.type
- pytype_name = "text"
- if issubclass(pytype, np.floating):
- pytype_name = "float"
- elif issubclass(pytype, np.integer):
- pytype_name = "int"
- elif issubclass(pytype, np.datetime64) or pytype is datetime:
- # Caution: np.datetime64 is also a subclass of np.number.
- pytype_name = "datetime"
- elif pytype is datetime.date:
- pytype_name = "date"
- elif issubclass(pytype, np.bool_):
- pytype_name = "bool"
-
- return _SQL_TYPES[pytype_name][flavor]
-
- lookup_type = lambda dtype: get_sqltype(dtype, flavor)
-
- column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes))
- if flavor == 'sqlite':
- columns = ',\n '.join('[%s] %s' % x for x in column_types)
- else:
- columns = ',\n '.join('`%s` %s' % x for x in column_types)
-
- keystr = ''
- if keys is not None:
- if isinstance(keys, string_types):
- keys = (keys,)
- keystr = ', PRIMARY KEY (%s)' % ','.join(keys)
- template = """CREATE TABLE %(name)s (
- %(columns)s
- %(keystr)s
- );"""
- create_statement = template % {'name': name, 'columns': columns,
- 'keystr': keystr}
- return create_statement
+ return pandas_sql._create_sql_schema(frame, name, keys=keys)
# legacy names, with depreciation warnings and copied docs
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 4a4b9da619b5f..80988ab2f5e1c 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -1449,6 +1449,15 @@ def _get_index_columns(self, tbl_name):
def test_to_sql_save_index(self):
self._to_sql_save_index()
+ for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
+ if ix_name not in ix_cols:
+ ix_cols[ix_name] = []
+ ix_cols[ix_name].append(ix_col)
+ return ix_cols.values()
+
+ def test_to_sql_save_index(self):
+ self._to_sql_save_index()
+
#------------------------------------------------------------------------------
#--- Old tests from 0.13.1 (before refactor using sqlalchemy)
@@ -1545,7 +1554,7 @@ def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite', keys=['A', 'B'],)
lines = create_sql.splitlines()
- self.assertTrue('PRIMARY KEY (A,B)' in create_sql)
+ self.assertTrue('PRIMARY KEY ([A],[B])' in create_sql)
cur = self.db.cursor()
cur.execute(create_sql)
@@ -1824,7 +1833,7 @@ def test_schema(self):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql', keys=['A', 'B'],)
lines = create_sql.splitlines()
- self.assertTrue('PRIMARY KEY (A,B)' in create_sql)
+ self.assertTrue('PRIMARY KEY (`A`,`B`)' in create_sql)
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
| Closes #8097
Reorganized a lot of the SQL schema creation code. Now database table is not dropped / create when `PandasSQLTable` object is instantiated, but instead when `insert` method is called on it. This allow us to `get_schema` to generate `CREATE TABLE` code without actually creating the table.
(Includes updates from #8083, so will have to be rebased when that is merged)
| https://api.github.com/repos/pandas-dev/pandas/pulls/8232 | 2014-09-10T04:16:17Z | 2014-09-15T21:43:01Z | 2014-09-15T21:43:01Z | 2014-09-15T21:50:33Z |
API: raise on setops for + and - for Indexes (GH8226) | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 04aa07a49ba8a..4bde90a402456 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1616,28 +1616,33 @@ display:
df
df['A']
+.. _indexing.setops:
Set operations on Index objects
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. warning::
+
+ In 0.15.0. the set operations ``+`` and ``-`` were deprecated in order to provide these for numeric type operations on certain
+ index types. ``+`` can be replace by ``.union()`` or ``|``, and ``-`` by ``.difference()``.
+
.. _indexing.set_ops:
-The three main operations are ``union (|)``, ``intersection (&)``, and ``diff
-(-)``. These can be directly called as instance methods or used via overloaded
-operators:
+The two main operations are ``union (|)``, ``intersection (&)``
+These can be directly called as instance methods or used via overloaded
+operators. Difference is provided via the ``.difference()`` method.
.. ipython:: python
a = Index(['c', 'b', 'a'])
b = Index(['c', 'e', 'd'])
- a.union(b)
a | b
a & b
- a - b
+ a.difference(b)
Also available is the ``sym_diff (^)`` operation, which returns elements
that appear in either ``idx1`` or ``idx2`` but not both. This is
-equivalent to the Index created by ``(idx1 - idx2) + (idx2 - idx1)``,
+equivalent to the Index created by ``(idx1.difference(idx2)).union(idx2.difference(idx1))``,
with duplicates dropped.
.. ipython:: python
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index f0f52d373a157..96d9b7c58c41a 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -19,6 +19,7 @@ users upgrade to this version.
- Internal refactoring of the ``Index`` class to no longer sub-class ``ndarray``, see :ref:`Internal Refactoring <whatsnew_0150.refactoring>`
- New datetimelike properties accessor ``.dt`` for Series, see :ref:`Datetimelike Properties <whatsnew_0150.dt>`
- dropping support for ``PyTables`` less than version 3.0.0, and ``numexpr`` less than version 2.1 (:issue:`7990`)
+ - API change in using Indexs set operations, see :ref:`here <whatsnew_0150.index_set_ops>`
- :ref:`Other Enhancements <whatsnew_0150.enhancements>`
@@ -343,6 +344,11 @@ API changes
- ``Series.to_csv()`` now returns a string when ``path=None``, matching the behaviour of
``DataFrame.to_csv()`` (:issue:`8215`).
+
+.. _whatsnew_0150.index_set_ops:
+
+- The Index set operations ``+`` and ``-`` were deprecated in order to provide these for numeric type operations on certain index types. ``+`` can be replace by ``.union()`` or ``|``, and ``-`` by ``.difference()``. Further the method name ``Index.diff()`` is deprecated and can be replaced by ``Index.difference()``
+
.. _whatsnew_0150.dt:
.dt accessor
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 348fb4f23cefc..4e8228f3d8631 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -711,8 +711,10 @@ def __add__(self, other):
from pandas.core.index import Index
from pandas.tseries.offsets import DateOffset
if isinstance(other, Index):
+ warnings.warn("using '+' to provide set union with Indexes is deprecated, "
+ "use .union()",FutureWarning)
return self.union(other)
- elif isinstance(other, (DateOffset, datetime.timedelta, np.timedelta64)):
+ if isinstance(other, (DateOffset, datetime.timedelta, np.timedelta64)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shift(other)
@@ -723,8 +725,10 @@ def __sub__(self, other):
from pandas.core.index import Index
from pandas.tseries.offsets import DateOffset
if isinstance(other, Index):
- return self.diff(other)
- elif isinstance(other, (DateOffset, datetime.timedelta, np.timedelta64)):
+ warnings.warn("using '-' to provide set differences with Indexes is deprecated, "
+ "use .difference()",FutureWarning)
+ return self.difference(other)
+ if isinstance(other, (DateOffset, datetime.timedelta, np.timedelta64)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shift(-other)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index ec1de70e18b4c..0d1876f213cc7 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -435,7 +435,7 @@ def reorder_levels(self, new_levels, ordered=None):
"""
new_levels = self._validate_levels(new_levels)
- if len(new_levels) < len(self._levels) or len(self._levels-new_levels):
+ if len(new_levels) < len(self._levels) or len(self._levels.difference(new_levels)):
raise ValueError('Reordered levels must include all original levels')
values = self.__array__()
self._codes = _get_codes_for_values(values, new_levels)
@@ -887,7 +887,7 @@ def __setitem__(self, key, value):
raise ValueError("cannot set a Categorical with another, without identical levels")
rvalue = value if com.is_list_like(value) else [value]
- to_add = Index(rvalue)-self.levels
+ to_add = Index(rvalue).difference(self.levels)
# no assignments of values not in levels, but it's always ok to set something to np.nan
if len(to_add) and not isnull(to_add).all():
raise ValueError("cannot setitem on a Categorical with a new level,"
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 352ac52281c54..141947da78cbf 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3682,7 +3682,7 @@ def append(self, other, ignore_index=False, verify_integrity=False):
'ignore_index=True')
index = None if other.name is None else [other.name]
- combined_columns = self.columns.tolist() + ((self.columns | other.index) - self.columns).tolist()
+ combined_columns = self.columns.tolist() + (self.columns | other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index, columns=combined_columns).convert_objects()
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 2a87563b88562..26ef375934ac9 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -473,7 +473,7 @@ def _set_selection_from_grouper(self):
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
- self._group_selection = (ax-Index(groupers)).tolist()
+ self._group_selection = ax.difference(Index(groupers)).tolist()
def _set_result_index_ordered(self, result):
# set the result index on the passed values object
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 065dcd90b8d76..61fb3bffc55a4 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1128,9 +1128,10 @@ def argsort(self, *args, **kwargs):
def __add__(self, other):
if isinstance(other, Index):
+ warnings.warn("using '+' to provide set union with Indexes is deprecated, "
+ "use '|' or .union()",FutureWarning)
return self.union(other)
- else:
- return Index(np.array(self) + other)
+ return Index(np.array(self) + other)
__iadd__ = __add__
__eq__ = _indexOp('__eq__')
@@ -1141,7 +1142,10 @@ def __add__(self, other):
__ge__ = _indexOp('__ge__')
def __sub__(self, other):
- return self.diff(other)
+ if isinstance(other, Index):
+ warnings.warn("using '-' to provide set differences with Indexes is deprecated, "
+ "use .difference()",FutureWarning)
+ return self.difference(other)
def __and__(self, other):
return self.intersection(other)
@@ -1273,7 +1277,7 @@ def intersection(self, other):
taken.name = None
return taken
- def diff(self, other):
+ def difference(self, other):
"""
Compute sorted set difference of two Index objects
@@ -1289,8 +1293,7 @@ def diff(self, other):
-----
One can do either of these and achieve the same result
- >>> index - index2
- >>> index.diff(index2)
+ >>> index.difference(index2)
"""
if not hasattr(other, '__iter__'):
@@ -1308,6 +1311,8 @@ def diff(self, other):
theDiff = sorted(set(self) - set(other))
return Index(theDiff, name=result_name)
+ diff = deprecate('diff',difference)
+
def sym_diff(self, other, result_name=None):
"""
Compute the sorted symmetric difference of two Index objects.
@@ -1350,7 +1355,7 @@ def sym_diff(self, other, result_name=None):
other = Index(other)
result_name = result_name or self.name
- the_diff = sorted(set((self - other) + (other - self)))
+ the_diff = sorted(set((self.difference(other)).union(other.difference(self))))
return Index(the_diff, name=result_name)
def get_loc(self, key):
@@ -4135,6 +4140,8 @@ def union(self, other):
Returns
-------
Index
+
+ >>> index.union(index2)
"""
self._assert_can_do_setop(other)
@@ -4177,7 +4184,7 @@ def intersection(self, other):
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
- def diff(self, other):
+ def difference(self, other):
"""
Compute sorted set difference of two MultiIndex objects
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 3ea85be27ac58..03de19afe0580 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -678,9 +678,9 @@ def _combine_frame(self, other, func, axis=0):
self.minor_axis)
def _combine_panel(self, other, func):
- items = self.items + other.items
- major = self.major_axis + other.major_axis
- minor = self.minor_axis + other.minor_axis
+ items = self.items.union(other.items)
+ major = self.major_axis.union(other.major_axis)
+ minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py
index 3eebd51190e3d..ec0a313ff5767 100644
--- a/pandas/core/panelnd.py
+++ b/pandas/core/panelnd.py
@@ -82,7 +82,7 @@ def _combine_with_constructor(self, other, func):
# combine labels to form new axes
new_axes = []
for a in self._AXIS_ORDERS:
- new_axes.append(getattr(self, a) + getattr(other, a))
+ new_axes.append(getattr(self, a).union(getattr(other, a)))
# reindex: could check that everything's the same size, but forget it
d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, new_axes)])
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 3ba589b8fa35d..458f672530270 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -848,7 +848,7 @@ def lreshape(data, groups, dropna=True, label=None):
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
- id_cols = list(data.columns.diff(all_cols))
+ id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b6aa546177729..2c166c231ae34 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1458,7 +1458,7 @@ def combine(self, other, func, fill_value=nan):
result : Series
"""
if isinstance(other, Series):
- new_index = self.index + other.index
+ new_index = self.index.union(other.index)
new_name = _maybe_match_name(self, other)
new_values = pa.empty(len(new_index), dtype=self.dtype)
for i, idx in enumerate(new_index):
@@ -1484,7 +1484,7 @@ def combine_first(self, other):
-------
y : Series
"""
- new_index = self.index + other.index
+ new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
name = _maybe_match_name(self, other)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 965b87d8044ed..5a68cb16f058f 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -970,7 +970,7 @@ def append_to_multiple(self, d, value, selector, data_columns=None,
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
- ordd = ordered - Index(remain_values)
+ ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
@@ -3245,7 +3245,7 @@ def get_blk_items(mgr, blocks):
data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex_axis(
- Index(axis_labels) - Index(data_columns),
+ Index(axis_labels).difference(Index(data_columns)),
axis=axis
)._data
@@ -3362,7 +3362,7 @@ def process_filter(field, filt):
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
- filt = filt + Index(self.levels)
+ filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.ix._getitem_axis(takers,
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index c1419ef2d023e..4f72c0d1c6cbe 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -2320,7 +2320,7 @@ def test_remove_startstop(self):
n = store.remove('wp5', start=16, stop=-16)
self.assertTrue(n == 120-32)
result = store.select('wp5')
- expected = wp.reindex(major_axis=wp.major_axis[:16//4]+wp.major_axis[-16//4:])
+ expected = wp.reindex(major_axis=wp.major_axis[:16//4].union(wp.major_axis[-16//4:]))
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp6')
@@ -2339,7 +2339,7 @@ def test_remove_startstop(self):
n = store.remove('wp7', where=[crit], stop=80)
self.assertTrue(n == 28)
result = store.select('wp7')
- expected = wp.reindex(major_axis=wp.major_axis-wp.major_axis[np.arange(0,20,3)])
+ expected = wp.reindex(major_axis=wp.major_axis.difference(wp.major_axis[np.arange(0,20,3)]))
assert_panel_equal(result, expected)
def test_remove_crit(self):
@@ -2357,7 +2357,7 @@ def test_remove_crit(self):
self.assertTrue(n == 36)
result = store.select('wp3')
- expected = wp.reindex(major_axis=wp.major_axis - date4)
+ expected = wp.reindex(major_axis=wp.major_axis.difference(date4))
assert_panel_equal(result, expected)
# upper half
@@ -2385,7 +2385,7 @@ def test_remove_crit(self):
crit1 = Term('major_axis=date1')
store.remove('wp2', where=[crit1])
result = store.select('wp2')
- expected = wp.reindex(major_axis=wp.major_axis - date1)
+ expected = wp.reindex(major_axis=wp.major_axis.difference(date1))
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
@@ -2393,7 +2393,7 @@ def test_remove_crit(self):
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
- major_axis=wp.major_axis - date1 - Index([date2]))
+ major_axis=wp.major_axis.difference(date1).difference(Index([date2])))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
@@ -2401,7 +2401,7 @@ def test_remove_crit(self):
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(
- major_axis=wp.major_axis - date1 - Index([date2]) - Index(date3))
+ major_axis=wp.major_axis.difference(date1).difference(Index([date2])).difference(Index(date3)))
assert_panel_equal(result, expected)
# corners
diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py
index 20bbc58cc908f..62e0e3e985775 100644
--- a/pandas/sparse/panel.py
+++ b/pandas/sparse/panel.py
@@ -427,9 +427,9 @@ def _new_like(self, new_frames):
default_kind=self.default_kind)
def _combinePanel(self, other, func):
- items = self.items + other.items
- major = self.major_axis + other.major_axis
- minor = self.minor_axis + other.minor_axis
+ items = self.items.union(other.items)
+ major = self.major_axis.union(other.major_axis)
+ minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 8b0605dd391be..dd7bc41c8d62c 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -790,13 +790,8 @@ def test_add_iadd(self):
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
- result_add = rng + other
result_union = rng.union(other)
-
- tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
- rng += other
- tm.assert_index_equal(rng, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')]
@@ -834,13 +829,8 @@ def test_sub_isub(self):
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
- result_add = rng - other
- result_union = rng.diff(other)
-
- tm.assert_index_equal(result_add, expected)
+ result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
- rng -= other
- tm.assert_index_equal(rng, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')]
@@ -1063,14 +1053,8 @@ def test_add_iadd(self):
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7)]:
- result_add = rng + other
result_union = rng.union(other)
-
- tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
- # GH 6527
- rng += other
- tm.assert_index_equal(rng, expected)
# offset
# DateOffset
@@ -1176,13 +1160,8 @@ def test_sub_isub(self):
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7),]:
- result_add = rng - other
- result_union = rng.diff(other)
-
- tm.assert_index_equal(result_add, expected)
+ result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
- rng -= other
- tm.assert_index_equal(rng, expected)
# offset
# DateOffset
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 92d7da511c99a..b5f8dec857f2f 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -8974,8 +8974,8 @@ def test_align(self):
self.assertTrue(bf.columns.equals(other.columns))
# test fill value
join_idx = self.frame.index.join(other.index)
- diff_a = self.frame.index.diff(join_idx)
- diff_b = other.index.diff(join_idx)
+ diff_a = self.frame.index.difference(join_idx)
+ diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
@@ -8993,8 +8993,8 @@ def test_align(self):
# test fill value
join_idx = self.frame.index.join(other.index)
- diff_a = self.frame.index.diff(join_idx)
- diff_b = other.index.diff(join_idx)
+ diff_a = self.frame.index.difference(join_idx)
+ diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 2c5f100843445..0734da1ab09aa 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -124,12 +124,12 @@ def test_get_numeric_data(self):
# _get_numeric_data is includes _get_bool_data, so can't test for non-inclusion
def test_get_default(self):
-
+
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
-
+
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i,d in zip(index, data):
@@ -501,7 +501,7 @@ def test_interp_regression(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
- new_index = ser.index + Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
+ new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
@@ -1153,7 +1153,7 @@ def test_tz_convert_and_localize(self):
# MultiIndex
# GH7846
- df2 = DataFrame(np.ones(5),
+ df2 = DataFrame(np.ones(5),
MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 8f33da4521c8e..a5301dce54dee 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -2261,10 +2261,10 @@ def test_df_legend_labels(self):
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
- self._check_legend_labels(ax, labels=df.columns + df3.columns)
+ self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
- expected = list(df.columns + df3.columns) + list(reversed(df4.columns))
+ expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 60105719179ad..97149e8f224d3 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -557,8 +557,13 @@ def test_union(self):
self.assertIsNone(union.name)
def test_add(self):
- firstCat = self.strIndex + self.dateIndex
- secondCat = self.strIndex + self.strIndex
+
+ # - API change GH 8226
+ with tm.assert_produces_warning():
+ self.strIndex + self.dateIndex
+
+ firstCat = self.strIndex.union(self.dateIndex)
+ secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
@@ -611,29 +616,30 @@ def test_iadd_string(self):
index += '_x'
self.assertIn('a_x', index)
- def test_diff(self):
+ def test_difference(self):
+
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
- result = first - second
+ result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
- result = first - second
+ result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
- result = first.diff([])
+ result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
- result = first.diff(first)
+ result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
@@ -2580,7 +2586,6 @@ def test_format_sparse_display(self):
self.assertEqual(result[3], '1 0 0 0')
def test_format_sparse_config(self):
- import warnings
warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
@@ -2775,9 +2780,15 @@ def test_intersection(self):
# result = self.index & tuples
# self.assertTrue(result.equals(tuples))
- def test_diff(self):
+ def test_difference(self):
+
first = self.index
- result = first - self.index[-3:]
+ result = first.difference(self.index[-3:])
+
+ # - API change GH 8226
+ with tm.assert_produces_warning():
+ first - self.index[-3:]
+
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
@@ -2787,19 +2798,19 @@ def test_diff(self):
self.assertEqual(result.names, self.index.names)
# empty difference: reflexive
- result = self.index - self.index
+ result = self.index.difference(self.index)
expected = self.index[:0]
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# empty difference: superset
- result = self.index[-3:] - self.index
+ result = self.index[-3:].difference(self.index)
expected = self.index[:0]
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# empty difference: degenerate
- result = self.index[:0] - self.index
+ result = self.index[:0].difference(self.index)
expected = self.index[:0]
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
@@ -2807,31 +2818,31 @@ def test_diff(self):
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
- result = first - chunklet
+ result = first.difference(chunklet)
self.assertEqual(result.names, (None, None))
# empty, but non-equal
- result = self.index - self.index.sortlevel(1)[0]
+ result = self.index.difference(self.index.sortlevel(1)[0])
self.assertEqual(len(result), 0)
# raise Exception called with non-MultiIndex
- result = first.diff(first._tuple_index)
+ result = first.difference(first._tuple_index)
self.assertTrue(result.equals(first[:0]))
# name from empty array
- result = first.diff([])
+ result = first.difference([])
self.assertTrue(first.equals(result))
self.assertEqual(first.names, result.names)
# name from non-empty array
- result = first.diff([('foo', 'one')])
+ result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'), ('qux', 'one'),
('qux', 'two')])
expected.names = first.names
self.assertEqual(first.names, result.names)
assertRaisesRegexp(TypeError, "other must be a MultiIndex or a list"
- " of tuples", first.diff, [1, 2, 3, 4, 5])
+ " of tuples", first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
assertRaisesRegexp(TypeError, 'Cannot infer number of levels from'
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 64a83ce3c3137..02a8f79e5a8c1 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5199,8 +5199,8 @@ def _check_align(a, b, how='left', fill=None):
join_index = a.index.join(b.index, how=how)
if fill is not None:
- diff_a = aa.index.diff(join_index)
- diff_b = ab.index.diff(join_index)
+ diff_a = aa.index.difference(join_index)
+ diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index bad0f9523aad2..69b9436a5769b 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -387,7 +387,7 @@ def test_join_multiindex(self):
df2 = df2.sortlevel(0)
joined = df1.join(df2, how='outer')
- ex_index = index1._tuple_index + index2._tuple_index
+ ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
@@ -397,7 +397,7 @@ def test_join_multiindex(self):
df2 = df2.sortlevel(1)
joined = df1.join(df2, how='outer').sortlevel(0)
- ex_index = index1._tuple_index + index2._tuple_index
+ ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index 6b34ae0eb9384..4a60cdbedae4d 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -59,7 +59,7 @@ def test_frame_inferred(self):
_check_plot_works(df.plot)
# axes freq
- idx = idx[0:40] + idx[45:99]
+ idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 63db28ca53cf1..828c2a554b02d 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -2898,7 +2898,7 @@ def test_datetimeindex_diff(self):
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
- self.assertEqual(len(dti1.diff(dti2)), 2)
+ self.assertEqual(len(dti1.difference(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
| closes #8226
- ToDo
- [x] currently a raise `TypeError` (need a better message), possibly could deprecate and just warn
- [x] API example
Combining indexes via `+` now requires `.union()`
Diffing indexes via `-` now requires `.diff()`
These make the Indexes use setops (`&`,`|`,`^`) ONLY
numeric ops will work on appropriate Index ONLY (see #8184) (e.g. `DatetimeIndex` +/- `Timedelta` or `TimedeltaIndex` +/- `TImedeltaIndex`)
Also `Int64/Float64Index` would gain ability to `+/-` as well
This will promote consistency that `+` means add (and not union), and `-` means subtract (and not difference), which IIRC has come up in a few issues
| https://api.github.com/repos/pandas-dev/pandas/pulls/8227 | 2014-09-09T22:31:37Z | 2014-09-11T15:44:57Z | 2014-09-11T15:44:57Z | 2014-10-06T14:28:34Z |
DOC: fix formatting error in io.rst | diff --git a/doc/source/io.rst b/doc/source/io.rst
index d60fc234650e0..e087542e6a7a5 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2541,17 +2541,17 @@ The right-hand side of the sub-expression (after a comparison operator) can be:
string = "HolyMoly'"
store.select('df', 'index == %s' % string)
- The latter will **not** work and will raise a ``SyntaxError``.Note that
- there's a single quote followed by a double quote in the ``string``
- variable.
+ The latter will **not** work and will raise a ``SyntaxError``.Note that
+ there's a single quote followed by a double quote in the ``string``
+ variable.
- If you *must* interpolate, use the ``'%r'`` format specifier
+ If you *must* interpolate, use the ``'%r'`` format specifier
- .. code-block:: python
+ .. code-block:: python
- store.select('df', 'index == %r' % string)
+ store.select('df', 'index == %r' % string)
- which will quote ``string``.
+ which will quote ``string``.
Here are some examples:
| Lines had an extra space causing the code block above to keep going for the rest of the note.
Before:

| https://api.github.com/repos/pandas-dev/pandas/pulls/8223 | 2014-09-09T14:53:53Z | 2014-09-09T14:57:12Z | 2014-09-09T14:57:12Z | 2014-09-09T14:57:16Z |
DOC: numexpr is now on github | diff --git a/README.md b/README.md
index 778984aa4cf52..93bfe7482d31e 100644
--- a/README.md
+++ b/README.md
@@ -96,7 +96,7 @@ pip install pandas
- Needed for time zone support with ``pandas.date_range``
### Highly Recommended Dependencies
-- [numexpr](http://code.google.com/p/numexpr/)
+- [numexpr](https://github.com/pydata/numexpr)
- Needed to accelerate some expression evaluation operations
- Required by PyTables
- [bottleneck](http://berkeleyanalytics.com/bottleneck)
| numexpr is now on github
| https://api.github.com/repos/pandas-dev/pandas/pulls/8221 | 2014-09-09T14:03:43Z | 2014-09-09T14:12:05Z | 2014-09-09T14:12:05Z | 2014-09-09T14:14:13Z |
API: Series.to_csv(path=None) should return string to match DataFrame.to_csv() | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index acf7fdfd6a10a..7b4a43c8ccf0e 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -309,6 +309,9 @@ API changes
df
df.dtypes
+- ``Series.to_csv()`` now returns a string when ``path=None``, matching the behaviour of
+ ``DataFrame.to_csv()`` (:issue:`8215`).
+
.. _whatsnew_0150.dt:
.dt accessor
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a0bbb2c713e56..b6aa546177729 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2200,7 +2200,8 @@ def to_csv(self, path, index=True, sep=",", na_rep='',
Parameters
----------
- path : string file path or file handle / StringIO
+ path : string file path or file handle / StringIO. If None is provided
+ the result is returned as a string.
na_rep : string, default ''
Missing data representation
float_format : string, default None
@@ -2224,10 +2225,13 @@ def to_csv(self, path, index=True, sep=",", na_rep='',
"""
from pandas.core.frame import DataFrame
df = DataFrame(self)
- df.to_csv(path, index=index, sep=sep, na_rep=na_rep,
+ # result is only a string if no path provided, otherwise None
+ result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep,
float_format=float_format, header=header,
index_label=index_label, mode=mode, nanRep=nanRep,
encoding=encoding, date_format=date_format)
+ if path is None:
+ return result
def dropna(self, axis=0, inplace=False, **kwargs):
"""
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index f9e7cf3545374..92d7da511c99a 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6428,6 +6428,14 @@ def test_to_csv_from_csv_categorical(self):
df2.to_csv(exp)
self.assertEqual(res.getvalue(), exp.getvalue())
+ def test_to_csv_path_is_none(self):
+ # GH 8215
+ # Make sure we return string for consistency with
+ # Series.to_csv()
+ csv_str = self.frame.to_csv(path=None)
+ self.assertIsInstance(csv_str, str)
+ recons = pd.read_csv(StringIO(csv_str), index_col=0)
+ assert_frame_equal(self.frame, recons)
def test_info(self):
io = StringIO()
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 022a8b543ce32..da4dc904c71e3 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -4371,6 +4371,14 @@ def test_to_csv_list_entries(self):
buf = StringIO()
split.to_csv(buf)
+ def test_to_csv_path_is_none(self):
+ # GH 8215
+ # Series.to_csv() was returning None, inconsistent with
+ # DataFrame.to_csv() which returned string
+ s = Series([1, 2, 3])
+ csv_str = s.to_csv(path=None)
+ self.assertIsInstance(csv_str, str)
+
def test_clip(self):
val = self.ts.median()
| Addresses #8215. Currently, `Series.to_csv(path=None)` returns `None`. The `DataFrame` method returns a string, and already had this behaviour mentioned in the docstring, so it should be OK to make the `Series` method match this.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8220 | 2014-09-09T09:08:06Z | 2014-09-09T11:29:21Z | 2014-09-09T11:29:21Z | 2014-09-09T11:29:36Z |
ENH to_latex mi index will use & sep for levels | diff --git a/pandas/core/format.py b/pandas/core/format.py
index 339cd9344f089..3f03db6dcf93f 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -513,12 +513,23 @@ def get_col_type(dtype):
else:
strcols = self._to_str_columns()
+ if self.index and isinstance(self.frame.index, MultiIndex):
+ fmt = self._get_formatter('__index__')
+ clevels = self.frame.columns.nlevels
+ strcols.pop(0)
+ name = any(self.frame.columns.names)
+ for i, lev in enumerate(self.frame.index.levels):
+ lev2 = lev.format(name=name)
+ width = len(lev2[0])
+ lev3 = [' ' * width] * clevels + lev2
+ strcols.insert(i, lev3)
+
if column_format is None:
dtypes = self.frame.dtypes.values
+ column_format = ''.join(map(get_col_type, dtypes))
if self.index:
- column_format = 'l%s' % ''.join(map(get_col_type, dtypes))
- else:
- column_format = '%s' % ''.join(map(get_col_type, dtypes))
+ index_format = 'l' * self.frame.index.nlevels
+ column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, not %s'
@@ -645,8 +656,8 @@ def has_index_names(self):
def has_column_names(self):
return _has_names(self.frame.columns)
- def _get_formatted_index(self,frame):
- # Note: this is only used by to_string(), not by to_html().
+ def _get_formatted_index(self, frame):
+ # Note: this is only used by to_string() and to_latex(), not by to_html().
index = frame.index
columns = frame.columns
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index aa19865342fab..64b0fd6ebf199 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -2098,11 +2098,11 @@ def test_to_latex_multiindex(self):
self.assertEqual(result, expected)
result = df.T.to_latex()
- expected = r"""\begin{tabular}{ll}
+ expected = r"""\begin{tabular}{lll}
\toprule
-{} & 0 \\
+ & & 0 \\
\midrule
-x y & a \\
+x & y & a \\
\bottomrule
\end{tabular}
"""
| follow up of #7982
_One question I had was the use of `{ }` (is that a convention? I removed it and it seems to compile ok.)_
Note: should test MI cols and MI index. Ah cripes... the names of the column Index don't print, not did they before...
| https://api.github.com/repos/pandas-dev/pandas/pulls/8219 | 2014-09-09T05:11:14Z | 2014-09-14T14:13:51Z | 2014-09-14T14:13:51Z | 2014-09-20T22:12:29Z |
BUG: Bug in read_csv where squeeze=True would return a view (GH8217) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index f73c080b6e71d..aaf9830b22f56 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -480,7 +480,6 @@ Enhancements
-- Bug in ``DataFrame.groupby`` where ``Grouper`` does not recognize level when frequency is specified (:issue:`7885`)
@@ -538,7 +537,9 @@ There are no experimental changes in 0.15.0
Bug Fixes
~~~~~~~~~
+- Bug in ``read_csv`` where ``squeeze=True`` would return a view (:issue:`8217`)
- Bug in checking of table name in ``read_sql`` in certain cases (:issue:`7826`).
+- Bug in ``DataFrame.groupby`` where ``Grouper`` does not recognize level when frequency is specified (:issue:`7885`)
- Bug in multiindexes dtypes getting mixed up when DataFrame is saved to SQL table (:issue:`8021`)
- Bug in Series 0-division with a float and integer operand dtypes (:issue:`7785`)
- Bug in ``Series.astype("unicode")`` not calling ``unicode`` on the values correctly (:issue:`7758`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6d2afbad36e35..3a5b3af7b4c74 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -703,7 +703,7 @@ def read(self, nrows=None):
df = DataFrame(col_dict, columns=columns, index=index)
if self.squeeze and len(df.columns) == 1:
- return df[df.columns[0]]
+ return df[df.columns[0]].copy()
return df
def _create_index(self, ret):
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 14e69179f9ff4..f2b9a9447e8fb 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -263,6 +263,15 @@ def test_squeeze(self):
tm.assert_isinstance(result, Series)
tm.assert_series_equal(result, expected)
+ def test_squeeze_no_view(self):
+
+ # GH 8217
+ # series should not be a view
+
+ data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
+ result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
+ self.assertFalse(result._is_view)
+
def test_inf_parsing(self):
data = """\
,A
| closes #8217
| https://api.github.com/repos/pandas-dev/pandas/pulls/8218 | 2014-09-08T23:08:34Z | 2014-09-08T23:40:44Z | 2014-09-08T23:40:44Z | 2014-09-08T23:40:44Z |
ENH: sql support for writing NaN + datetime64 values (GH2754, GH7103) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index f0c3c0e6bc508..49c431d8071e8 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -494,6 +494,9 @@ Enhancements
df.to_sql('table', engine, schema='other_schema')
pd.read_sql_table('table', engine, schema='other_schema')
+- Added support for writing ``NaN`` values with ``to_sql`` (:issue:`2754`).
+- Added support for writing datetime64 columns with ``to_sql`` for all database flavors (:issue:`7103`).
+
- Added support for bool, uint8, uint16 and uint32 datatypes in ``to_stata`` (:issue:`7097`, :issue:`7365`)
- Added ``layout`` keyword to ``DataFrame.plot`` (:issue:`6667`)
@@ -573,7 +576,7 @@ Performance
- Performance improvements in ``StataWriter`` when writing large files (:issue:`8079`)
- Performance and memory usage improvements in multi-key ``groupby`` (:issue:`8128`)
- Performance improvements in groupby ``.agg`` and ``.apply`` where builtins max/min were not mapped to numpy/cythonized versions (:issue:`7722`)
-
+- Performance improvement in writing to sql (``to_sql``) of up to 50% (:issue:`8208`).
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index c960a73bb0f88..462179b442ac0 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -15,6 +15,7 @@
import pandas.core.common as com
from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
from pandas.core.api import DataFrame, Series
+from pandas.core.common import notnull, isnull
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
@@ -598,12 +599,6 @@ def create(self):
def insert_statement(self):
return self.table.insert()
- def maybe_asscalar(self, i):
- try:
- return np.asscalar(i)
- except AttributeError:
- return i
-
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
@@ -615,16 +610,36 @@ def insert_data(self):
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
+
+ column_names = list(map(str, temp.columns))
+ ncols = len(column_names)
+ data_list = [None] * ncols
+ blocks = temp._data.blocks
+
+ for i in range(len(blocks)):
+ b = blocks[i]
+ if b.is_datetime:
+ # convert to microsecond resolution so this yields datetime.datetime
+ d = b.values.astype('M8[us]').astype(object)
+ else:
+ d = np.array(b.values, dtype=object)
+
+ # replace NaN with None
+ if b._can_hold_na:
+ mask = isnull(d)
+ d[mask] = None
- return temp
+ for col_loc, col in zip(b.mgr_locs, d):
+ data_list[col_loc] = col
+
+ return column_names, data_list
def insert(self, chunksize=None):
ins = self.insert_statement()
- temp = self.insert_data()
- keys = list(map(str, temp.columns))
+ keys, data_list = self.insert_data()
- nrows = len(temp)
+ nrows = len(self.frame)
if chunksize is None:
chunksize = nrows
chunks = int(nrows / chunksize) + 1
@@ -636,12 +651,11 @@ def insert(self, chunksize=None):
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
- data_list = []
- for t in temp.iloc[start_i:end_i].itertuples():
- data = dict((k, self.maybe_asscalar(v))
- for k, v in zip(keys, t[1:]))
- data_list.append(data)
- con.execute(ins, data_list)
+
+ chunk_list = [arr[start_i:end_i] for arr in data_list]
+ insert_list = [dict((k, v) for k, v in zip(keys, row))
+ for row in zip(*chunk_list)]
+ con.execute(ins, insert_list)
def read(self, coerce_float=True, parse_dates=None, columns=None):
@@ -758,12 +772,12 @@ def _harmonize_columns(self, parse_dates=None):
elif col_type is float:
# floats support NA, can always convert!
- self.frame[col_name].astype(col_type, copy=False)
+ self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
- if col_type is int or col_type is bool:
- self.frame[col_name].astype(col_type, copy=False)
+ if col_type is np.dtype('int64') or col_type is bool:
+ self.frame[col_name] = df_col.astype(col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
@@ -813,7 +827,7 @@ def _numpy_type(self, sqltype):
return float
if isinstance(sqltype, Integer):
# TODO: Refine integer size.
- return int
+ return np.dtype('int64')
if isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
@@ -1008,9 +1022,9 @@ def insert_statement(self):
def insert(self, chunksize=None):
ins = self.insert_statement()
- temp = self.insert_data()
+ keys, data_list = self.insert_data()
- nrows = len(temp)
+ nrows = len(self.frame)
if chunksize is None:
chunksize = nrows
chunks = int(nrows / chunksize) + 1
@@ -1021,13 +1035,11 @@ def insert(self, chunksize=None):
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
- data_list = []
- for t in temp.iloc[start_i:end_i].itertuples():
- data = tuple((self.maybe_asscalar(v) for v in t[1:]))
- data_list.append(data)
-
+ chunk_list = [arr[start_i:end_i] for arr in data_list]
+ insert_list = [tuple((v for v in row))
+ for row in zip(*chunk_list)]
cur = self.pd_sql.con.cursor()
- cur.executemany(ins, data_list)
+ cur.executemany(ins, insert_list)
cur.close()
def _create_table_setup(self):
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 3ad9669abb883..53ddd5c42a1d7 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -952,9 +952,6 @@ def test_date_parsing(self):
"IntDateCol loaded with incorrect type")
def test_datetime(self):
- if self.driver == 'pymysql':
- raise nose.SkipTest('writing datetime not working with pymysql')
-
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
@@ -975,17 +972,6 @@ def test_datetime(self):
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
- # status:
- # - postgresql: gives error on inserting "0001-255-255T00:00:00"
- # - sqlite3: works, but reading it with query returns '-001--1--1 -1:-1:-1.-00001'
-
- if self.driver == 'pymysql':
- raise nose.SkipTest('writing datetime not working with pymysql')
- if self.driver == 'psycopg2':
- raise nose.SkipTest('writing datetime NaT not working with psycopg2')
- if self.flavor == 'sqlite':
- raise nose.SkipTest('reading datetime NaT not working with sqlite')
-
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
@@ -1032,9 +1018,6 @@ def test_mixed_dtype_insert(self):
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
- if self.driver == 'pymysql':
- raise nose.SkipTest('writing NaNs not working with pymysql')
-
# NaNs in numeric float column
df = DataFrame({'A':[0, 1, 2], 'B':[0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
@@ -1048,37 +1031,27 @@ def test_nan_numeric(self):
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
- if self.driver == 'pymysql':
- raise nose.SkipTest('writing NaNs not working with pymysql')
-
# full NaN column (numeric float column)
df = DataFrame({'A':[0, 1, 2], 'B':[np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
- if self.flavor == 'sqlite':
- df['B'] = df['B'].astype('object')
- df['B'] = None
-
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
- # with read_sql
+ # with read_sql -> not type info from table -> stays None
+ df['B'] = df['B'].astype('object')
+ df['B'] = None
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
- if self.driver == 'pymysql':
- raise nose.SkipTest('writing NaNs not working with pymysql')
-
# NaNs in string column
df = DataFrame({'A':[0, 1, 2], 'B':['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
- if self.flavor == 'sqlite':
- df.loc[2, 'B'] = None
- elif self.flavor == 'postgresql':
- df = df.fillna('NaN')
+ # NaNs are coming back as None
+ df.loc[2, 'B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
| Closes #2754
Closes #7103, #7936 (supercedes PR #8205 for this)
Closes #7567
Database drivers expect `None` to translate this to `NULL`.
Therefore, dataframe is cast to object dtype to convert `NaN` to `None`.
To check: performance impact. In principle should also not needed for integer colums (can't have NaNs)
Kind of API change: it did already work for postgresql (psycopg2), but it was translated to postgresql's `NaN` and now to `NULL` which is something different (but I think this is actually an improvement)
| https://api.github.com/repos/pandas-dev/pandas/pulls/8208 | 2014-09-07T21:16:07Z | 2014-09-13T21:48:56Z | 2014-09-13T21:48:56Z | 2014-09-21T19:05:04Z |
DOC: reorg indexing docs | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 6b1bfdf7b241d..3a3b3d5e36977 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -166,7 +166,7 @@ Selection
recommend the optimized pandas data access methods, ``.at``, ``.iat``,
``.loc``, ``.iloc`` and ``.ix``.
-See the :ref:`Indexing section <indexing>` and below.
+See the indexing documentation :ref:`Indexing and Selecing Data <indexing>` and :ref:`MultiIndex / Advanced Indexing <advanced>`
Getting
~~~~~~~
@@ -529,7 +529,7 @@ the function.
Reshaping
---------
-See the sections on :ref:`Hierarchical Indexing <indexing.hierarchical>` and
+See the sections on :ref:`Hierarchical Indexing <advanced.hierarchical>` and
:ref:`Reshaping <reshaping.stacking>`.
Stack
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
new file mode 100644
index 0000000000000..1749409c863df
--- /dev/null
+++ b/doc/source/advanced.rst
@@ -0,0 +1,709 @@
+.. _advanced:
+
+.. currentmodule:: pandas
+
+.. ipython:: python
+ :suppress:
+
+ import numpy as np
+ import random
+ np.random.seed(123456)
+ from pandas import *
+ options.display.max_rows=15
+ import pandas as pd
+ randn = np.random.randn
+ randint = np.random.randint
+ np.set_printoptions(precision=4, suppress=True)
+ from pandas.compat import range, zip
+
+******************************
+MultiIndex / Advanced Indexing
+******************************
+
+This section covers indexing with a ``MultiIndex`` and more advanced indexing features.
+
+See the :ref:`Indexing and Selecting Data <indexing>` for general indexing documentation.
+
+.. warning::
+
+ Whether a copy or a reference is returned for a setting operation, may
+ depend on the context. This is sometimes called ``chained assignment`` and
+ should be avoided. See :ref:`Returning a View versus Copy
+ <indexing.view_versus_copy>`
+
+.. warning::
+
+ In 0.15.0 ``Index`` has internally been refactored to no longer sub-class ``ndarray``
+ but instead subclass ``PandasObject``, similarly to the rest of the pandas objects. This should be
+ a transparent change with only very limited API implications (See the :ref:`Internal Refactoring <whatsnew_0150.refactoring>`)
+
+See the :ref:`cookbook<cookbook.selection>` for some advanced strategies
+
+.. _advanced.hierarchical:
+
+Hierarchical indexing (MultiIndex)
+----------------------------------
+
+Hierarchical / Multi-level indexing is very exciting as it opens the door to some
+quite sophisticated data analysis and manipulation, especially for working with
+higher dimensional data. In essence, it enables you to store and manipulate
+data with an arbitrary number of dimensions in lower dimensional data
+structures like Series (1d) and DataFrame (2d).
+
+In this section, we will show what exactly we mean by "hierarchical" indexing
+and how it integrates with the all of the pandas indexing functionality
+described above and in prior sections. Later, when discussing :ref:`group by
+<groupby>` and :ref:`pivoting and reshaping data <reshaping>`, we'll show
+non-trivial applications to illustrate how it aids in structuring data for
+analysis.
+
+See the :ref:`cookbook<cookbook.multi_index>` for some advanced strategies
+
+Creating a MultiIndex (hierarchical index) object
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``MultiIndex`` object is the hierarchical analogue of the standard
+``Index`` object which typically stores the axis labels in pandas objects. You
+can think of ``MultiIndex`` an array of tuples where each tuple is unique. A
+``MultiIndex`` can be created from a list of arrays (using
+``MultiIndex.from_arrays``), an array of tuples (using
+``MultiIndex.from_tuples``), or a crossed set of iterables (using
+``MultiIndex.from_product``). The ``Index`` constructor will attempt to return
+a ``MultiIndex`` when it is passed a list of tuples. The following examples
+demo different ways to initialize MultiIndexes.
+
+
+.. ipython:: python
+
+ arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
+ ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
+ tuples = list(zip(*arrays))
+ tuples
+
+ index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
+ index
+
+ s = Series(randn(8), index=index)
+ s
+
+When you want every pairing of the elements in two iterables, it can be easier
+to use the ``MultiIndex.from_product`` function:
+
+.. ipython:: python
+
+ iterables = [['bar', 'baz', 'foo', 'qux'], ['one', 'two']]
+ MultiIndex.from_product(iterables, names=['first', 'second'])
+
+As a convenience, you can pass a list of arrays directly into Series or
+DataFrame to construct a MultiIndex automatically:
+
+.. ipython:: python
+
+ arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']),
+ np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])]
+ s = Series(randn(8), index=arrays)
+ s
+ df = DataFrame(randn(8, 4), index=arrays)
+ df
+
+All of the ``MultiIndex`` constructors accept a ``names`` argument which stores
+string names for the levels themselves. If no names are provided, ``None`` will
+be assigned:
+
+.. ipython:: python
+
+ df.index.names
+
+This index can back any axis of a pandas object, and the number of **levels**
+of the index is up to you:
+
+.. ipython:: python
+
+ df = DataFrame(randn(3, 8), index=['A', 'B', 'C'], columns=index)
+ df
+ DataFrame(randn(6, 6), index=index[:6], columns=index[:6])
+
+We've "sparsified" the higher levels of the indexes to make the console output a
+bit easier on the eyes.
+
+It's worth keeping in mind that there's nothing preventing you from using
+tuples as atomic labels on an axis:
+
+.. ipython:: python
+
+ Series(randn(8), index=tuples)
+
+The reason that the ``MultiIndex`` matters is that it can allow you to do
+grouping, selection, and reshaping operations as we will describe below and in
+subsequent areas of the documentation. As you will see in later sections, you
+can find yourself working with hierarchically-indexed data without creating a
+``MultiIndex`` explicitly yourself. However, when loading data from a file, you
+may wish to generate your own ``MultiIndex`` when preparing the data set.
+
+Note that how the index is displayed by be controlled using the
+``multi_sparse`` option in ``pandas.set_printoptions``:
+
+.. ipython:: python
+
+ pd.set_option('display.multi_sparse', False)
+ df
+ pd.set_option('display.multi_sparse', True)
+
+.. _advanced.get_level_values:
+
+Reconstructing the level labels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The method ``get_level_values`` will return a vector of the labels for each
+location at a particular level:
+
+.. ipython:: python
+
+ index.get_level_values(0)
+ index.get_level_values('second')
+
+
+Basic indexing on axis with MultiIndex
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+One of the important features of hierarchical indexing is that you can select
+data by a "partial" label identifying a subgroup in the data. **Partial**
+selection "drops" levels of the hierarchical index in the result in a
+completely analogous way to selecting a column in a regular DataFrame:
+
+.. ipython:: python
+
+ df['bar']
+ df['bar', 'one']
+ df['bar']['one']
+ s['qux']
+
+See :ref:`Cross-section with hierarchical index <advanced.xs>` for how to select
+on a deeper level.
+
+
+Data alignment and using ``reindex``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Operations between differently-indexed objects having ``MultiIndex`` on the
+axes will work as you expect; data alignment will work the same as an Index of
+tuples:
+
+.. ipython:: python
+
+ s + s[:-2]
+ s + s[::2]
+
+``reindex`` can be called with another ``MultiIndex`` or even a list or array
+of tuples:
+
+.. ipython:: python
+
+ s.reindex(index[:3])
+ s.reindex([('foo', 'two'), ('bar', 'one'), ('qux', 'one'), ('baz', 'one')])
+
+.. _advanced.advanced_hierarchical:
+
+Advanced indexing with hierarchical index
+-----------------------------------------
+
+Syntactically integrating ``MultiIndex`` in advanced indexing with ``.loc/.ix`` is a
+bit challenging, but we've made every effort to do so. for example the
+following works as you would expect:
+
+.. ipython:: python
+
+ df = df.T
+ df
+ df.loc['bar']
+ df.loc['bar', 'two']
+
+"Partial" slicing also works quite nicely.
+
+.. ipython:: python
+
+ df.loc['baz':'foo']
+
+You can slice with a 'range' of values, by providing a slice of tuples.
+
+.. ipython:: python
+
+ df.loc[('baz', 'two'):('qux', 'one')]
+ df.loc[('baz', 'two'):'foo']
+
+Passing a list of labels or tuples works similar to reindexing:
+
+.. ipython:: python
+
+ df.ix[[('bar', 'two'), ('qux', 'one')]]
+
+.. _advanced.mi_slicers:
+
+Using slicers
+~~~~~~~~~~~~~
+
+.. versionadded:: 0.14.0
+
+In 0.14.0 we added a new way to slice multi-indexed objects.
+You can slice a multi-index by providing multiple indexers.
+
+You can provide any of the selectors as if you are indexing by label, see :ref:`Selection by Label <indexing.label>`,
+including slices, lists of labels, labels, and boolean indexers.
+
+You can use ``slice(None)`` to select all the contents of *that* level. You do not need to specify all the
+*deeper* levels, they will be implied as ``slice(None)``.
+
+As usual, **both sides** of the slicers are included as this is label indexing.
+
+.. warning::
+
+ You should specify all axes in the ``.loc`` specifier, meaning the indexer for the **index** and
+ for the **columns**. Their are some ambiguous cases where the passed indexer could be mis-interpreted
+ as indexing *both* axes, rather than into say the MuliIndex for the rows.
+
+ You should do this:
+
+ .. code-block:: python
+
+ df.loc[(slice('A1','A3'),.....),:]
+
+ rather than this:
+
+ .. code-block:: python
+
+ df.loc[(slice('A1','A3'),.....)]
+
+.. warning::
+
+ You will need to make sure that the selection axes are fully lexsorted!
+
+.. ipython:: python
+
+ def mklbl(prefix,n):
+ return ["%s%s" % (prefix,i) for i in range(n)]
+
+ miindex = MultiIndex.from_product([mklbl('A',4),
+ mklbl('B',2),
+ mklbl('C',4),
+ mklbl('D',2)])
+ micolumns = MultiIndex.from_tuples([('a','foo'),('a','bar'),
+ ('b','foo'),('b','bah')],
+ names=['lvl0', 'lvl1'])
+ dfmi = DataFrame(np.arange(len(miindex)*len(micolumns)).reshape((len(miindex),len(micolumns))),
+ index=miindex,
+ columns=micolumns).sortlevel().sortlevel(axis=1)
+ dfmi
+
+Basic multi-index slicing using slices, lists, and labels.
+
+.. ipython:: python
+
+ dfmi.loc[(slice('A1','A3'),slice(None), ['C1','C3']),:]
+
+You can use a ``pd.IndexSlice`` to have a more natural syntax using ``:`` rather than using ``slice(None)``
+
+.. ipython:: python
+
+ idx = pd.IndexSlice
+ dfmi.loc[idx[:,:,['C1','C3']],idx[:,'foo']]
+
+It is possible to perform quite complicated selections using this method on multiple
+axes at the same time.
+
+.. ipython:: python
+
+ dfmi.loc['A1',(slice(None),'foo')]
+ dfmi.loc[idx[:,:,['C1','C3']],idx[:,'foo']]
+
+Using a boolean indexer you can provide selection related to the *values*.
+
+.. ipython:: python
+
+ mask = dfmi[('a','foo')]>200
+ dfmi.loc[idx[mask,:,['C1','C3']],idx[:,'foo']]
+
+You can also specify the ``axis`` argument to ``.loc`` to interpret the passed
+slicers on a single axis.
+
+.. ipython:: python
+
+ dfmi.loc(axis=0)[:,:,['C1','C3']]
+
+Furthermore you can *set* the values using these methods
+
+.. ipython:: python
+
+ df2 = dfmi.copy()
+ df2.loc(axis=0)[:,:,['C1','C3']] = -10
+ df2
+
+You can use a right-hand-side of an alignable object as well.
+
+.. ipython:: python
+
+ df2 = dfmi.copy()
+ df2.loc[idx[:,:,['C1','C3']],:] = df2*1000
+ df2
+
+.. _advanced.xs:
+
+Cross-section
+~~~~~~~~~~~~~
+
+The ``xs`` method of ``DataFrame`` additionally takes a level argument to make
+selecting data at a particular level of a MultiIndex easier.
+
+.. ipython:: python
+
+ df
+ df.xs('one', level='second')
+
+.. ipython:: python
+
+ # using the slicers (new in 0.14.0)
+ df.loc[(slice(None),'one'),:]
+
+You can also select on the columns with :meth:`~pandas.MultiIndex.xs`, by
+providing the axis argument
+
+.. ipython:: python
+
+ df = df.T
+ df.xs('one', level='second', axis=1)
+
+.. ipython:: python
+
+ # using the slicers (new in 0.14.0)
+ df.loc[:,(slice(None),'one')]
+
+:meth:`~pandas.MultiIndex.xs` also allows selection with multiple keys
+
+.. ipython:: python
+
+ df.xs(('one', 'bar'), level=('second', 'first'), axis=1)
+
+.. ipython:: python
+
+ # using the slicers (new in 0.14.0)
+ df.loc[:,('bar','one')]
+
+.. versionadded:: 0.13.0
+
+You can pass ``drop_level=False`` to :meth:`~pandas.MultiIndex.xs` to retain
+the level that was selected
+
+.. ipython:: python
+
+ df.xs('one', level='second', axis=1, drop_level=False)
+
+versus the result with ``drop_level=True`` (the default value)
+
+.. ipython:: python
+
+ df.xs('one', level='second', axis=1, drop_level=True)
+
+.. ipython:: python
+ :suppress:
+
+ df = df.T
+
+.. _advanced.advanced_reindex:
+
+Advanced reindexing and alignment
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The parameter ``level`` has been added to the ``reindex`` and ``align`` methods
+of pandas objects. This is useful to broadcast values across a level. For
+instance:
+
+.. ipython:: python
+
+ midx = MultiIndex(levels=[['zero', 'one'], ['x','y']],
+ labels=[[1,1,0,0],[1,0,1,0]])
+ df = DataFrame(randn(4,2), index=midx)
+ df
+ df2 = df.mean(level=0)
+ df2
+ df2.reindex(df.index, level=0)
+
+ # aligning
+ df_aligned, df2_aligned = df.align(df2, level=0)
+ df_aligned
+ df2_aligned
+
+
+Swapping levels with :meth:`~pandas.MultiIndex.swaplevel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``swaplevel`` function can switch the order of two levels:
+
+.. ipython:: python
+
+ df[:5]
+ df[:5].swaplevel(0, 1, axis=0)
+
+.. _advanced.reorderlevels:
+
+Reordering levels with :meth:`~pandas.MultiIndex.reorder_levels`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``reorder_levels`` function generalizes the ``swaplevel`` function,
+allowing you to permute the hierarchical index levels in one step:
+
+.. ipython:: python
+
+ df[:5].reorder_levels([1,0], axis=0)
+
+The need for sortedness with :class:`~pandas.MultiIndex`
+--------------------------------------------------------
+
+**Caveat emptor**: the present implementation of ``MultiIndex`` requires that
+the labels be sorted for some of the slicing / indexing routines to work
+correctly. You can think about breaking the axis into unique groups, where at
+the hierarchical level of interest, each distinct group shares a label, but no
+two have the same label. However, the ``MultiIndex`` does not enforce this:
+**you are responsible for ensuring that things are properly sorted**. There is
+an important new method ``sortlevel`` to sort an axis within a ``MultiIndex``
+so that its labels are grouped and sorted by the original ordering of the
+associated factor at that level. Note that this does not necessarily mean the
+labels will be sorted lexicographically!
+
+.. ipython:: python
+
+ import random; random.shuffle(tuples)
+ s = Series(randn(8), index=MultiIndex.from_tuples(tuples))
+ s
+ s.sortlevel(0)
+ s.sortlevel(1)
+
+.. _advanced.sortlevel_byname:
+
+Note, you may also pass a level name to ``sortlevel`` if the MultiIndex levels
+are named.
+
+.. ipython:: python
+
+ s.index.set_names(['L1', 'L2'], inplace=True)
+ s.sortlevel(level='L1')
+ s.sortlevel(level='L2')
+
+Some indexing will work even if the data are not sorted, but will be rather
+inefficient and will also return a copy of the data rather than a view:
+
+.. ipython:: python
+
+ s['qux']
+ s.sortlevel(1)['qux']
+
+On higher dimensional objects, you can sort any of the other axes by level if
+they have a MultiIndex:
+
+.. ipython:: python
+
+ df.T.sortlevel(1, axis=1)
+
+The ``MultiIndex`` object has code to **explicity check the sort depth**. Thus,
+if you try to index at a depth at which the index is not sorted, it will raise
+an exception. Here is a concrete example to illustrate this:
+
+.. ipython:: python
+
+ tuples = [('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')]
+ idx = MultiIndex.from_tuples(tuples)
+ idx.lexsort_depth
+
+ reordered = idx[[1, 0, 3, 2]]
+ reordered.lexsort_depth
+
+ s = Series(randn(4), index=reordered)
+ s.ix['a':'a']
+
+However:
+
+::
+
+ >>> s.ix[('a', 'b'):('b', 'a')]
+ Traceback (most recent call last)
+ ...
+ KeyError: Key length (3) was greater than MultiIndex lexsort depth (2)
+
+
+Take Methods
+------------
+
+.. _advanced.take:
+
+Similar to numpy ndarrays, pandas Index, Series, and DataFrame also provides
+the ``take`` method that retrieves elements along a given axis at the given
+indices. The given indices must be either a list or an ndarray of integer
+index positions. ``take`` will also accept negative integers as relative positions to the end of the object.
+
+.. ipython:: python
+
+ index = Index(randint(0, 1000, 10))
+ index
+
+ positions = [0, 9, 3]
+
+ index[positions]
+ index.take(positions)
+
+ ser = Series(randn(10))
+
+ ser.iloc[positions]
+ ser.take(positions)
+
+For DataFrames, the given indices should be a 1d list or ndarray that specifies
+row or column positions.
+
+.. ipython:: python
+
+ frm = DataFrame(randn(5, 3))
+
+ frm.take([1, 4, 3])
+
+ frm.take([0, 2], axis=1)
+
+It is important to note that the ``take`` method on pandas objects are not
+intended to work on boolean indices and may return unexpected results.
+
+.. ipython:: python
+
+ arr = randn(10)
+ arr.take([False, False, True, True])
+ arr[[0, 1]]
+
+ ser = Series(randn(10))
+ ser.take([False, False, True, True])
+ ser.ix[[0, 1]]
+
+Finally, as a small note on performance, because the ``take`` method handles
+a narrower range of inputs, it can offer performance that is a good deal
+faster than fancy indexing.
+
+.. ipython::
+
+ arr = randn(10000, 5)
+ indexer = np.arange(10000)
+ random.shuffle(indexer)
+
+ timeit arr[indexer]
+ timeit arr.take(indexer, axis=0)
+
+ ser = Series(arr[:, 0])
+ timeit ser.ix[indexer]
+ timeit ser.take(indexer)
+
+.. _indexing.float64index:
+
+Float64Index
+------------
+
+.. note::
+
+ As of 0.14.0, ``Float64Index`` is backed by a native ``float64`` dtype
+ array. Prior to 0.14.0, ``Float64Index`` was backed by an ``object`` dtype
+ array. Using a ``float64`` dtype in the backend speeds up arithmetic
+ operations by about 30x and boolean indexing operations on the
+ ``Float64Index`` itself are about 2x as fast.
+
+
+.. versionadded:: 0.13.0
+
+By default a ``Float64Index`` will be automatically created when passing floating, or mixed-integer-floating values in index creation.
+This enables a pure label-based slicing paradigm that makes ``[],ix,loc`` for scalar indexing and slicing work exactly the
+same.
+
+.. ipython:: python
+
+ indexf = Index([1.5, 2, 3, 4.5, 5])
+ indexf
+ sf = Series(range(5),index=indexf)
+ sf
+
+Scalar selection for ``[],.ix,.loc`` will always be label based. An integer will match an equal float index (e.g. ``3`` is equivalent to ``3.0``)
+
+.. ipython:: python
+
+ sf[3]
+ sf[3.0]
+ sf.ix[3]
+ sf.ix[3.0]
+ sf.loc[3]
+ sf.loc[3.0]
+
+The only positional indexing is via ``iloc``
+
+.. ipython:: python
+
+ sf.iloc[3]
+
+A scalar index that is not found will raise ``KeyError``
+
+Slicing is ALWAYS on the values of the index, for ``[],ix,loc`` and ALWAYS positional with ``iloc``
+
+.. ipython:: python
+
+ sf[2:4]
+ sf.ix[2:4]
+ sf.loc[2:4]
+ sf.iloc[2:4]
+
+In float indexes, slicing using floats is allowed
+
+.. ipython:: python
+
+ sf[2.1:4.6]
+ sf.loc[2.1:4.6]
+
+In non-float indexes, slicing using floats will raise a ``TypeError``
+
+.. code-block:: python
+
+ In [1]: Series(range(5))[3.5]
+ TypeError: the label [3.5] is not a proper indexer for this index type (Int64Index)
+
+ In [1]: Series(range(5))[3.5:4.5]
+ TypeError: the slice start [3.5] is not a proper indexer for this index type (Int64Index)
+
+Using a scalar float indexer will be deprecated in a future version, but is allowed for now.
+
+.. code-block:: python
+
+ In [3]: Series(range(5))[3.0]
+ Out[3]: 3
+
+Here is a typical use-case for using this type of indexing. Imagine that you have a somewhat
+irregular timedelta-like indexing scheme, but the data is recorded as floats. This could for
+example be millisecond offsets.
+
+.. ipython:: python
+
+ dfir = concat([DataFrame(randn(5,2),
+ index=np.arange(5) * 250.0,
+ columns=list('AB')),
+ DataFrame(randn(6,2),
+ index=np.arange(4,10) * 250.1,
+ columns=list('AB'))])
+ dfir
+
+Selection operations then will always work on a value basis, for all selection operators.
+
+.. ipython:: python
+
+ dfir[0:1000.4]
+ dfir.loc[0:1001,'A']
+ dfir.loc[1000.4]
+
+You could then easily pick out the first 1 second (1000 ms) of data then.
+
+.. ipython:: python
+
+ dfir[0:1000]
+
+Of course if you need integer based selection, then use ``iloc``
+
+.. ipython:: python
+
+ dfir.iloc[0:5]
+
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 81c2dfd4311f9..884976b55d6d1 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -410,7 +410,7 @@ values:
Here is a quick reference summary table of common functions. Each also takes an
optional ``level`` parameter which applies only if the object has a
-:ref:`hierarchical index<indexing.hierarchical>`.
+:ref:`hierarchical index<advanced.hierarchical>`.
.. csv-table::
:header: "Function", "Description"
@@ -822,7 +822,7 @@ DataFrame's index.
.. seealso::
- :ref:`Advanced indexing <indexing.advanced>` is an even more concise way of
+ :ref:`MultiIndex / Advanced Indexing <advanced>` is an even more concise way of
doing reindexing.
.. note::
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 805316d199fc6..243d1c02d1a65 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -86,7 +86,7 @@ The :ref:`indexing <indexing>` docs.
MultiIndexing
-------------
-The :ref:`multindexing <indexing.hierarchical>` docs.
+The :ref:`multindexing <advanced.hierarchical>` docs.
`Creating a multi-index from a labeled frame
<http://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels>`__
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 928de285982cf..44321375d31a2 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -828,7 +828,7 @@ Conversion to DataFrame
~~~~~~~~~~~~~~~~~~~~~~~
A Panel can be represented in 2D form as a hierarchically indexed
-DataFrame. See the section :ref:`hierarchical indexing <indexing.hierarchical>`
+DataFrame. See the section :ref:`hierarchical indexing <advanced.hierarchical>`
for more on this. To convert a Panel to a DataFrame, use the ``to_frame``
method:
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index fb1004edca785..1b21c5d7291e5 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -233,7 +233,7 @@ however pass ``sort=False`` for potential speedups:
GroupBy with MultiIndex
~~~~~~~~~~~~~~~~~~~~~~~
-With :ref:`hierarchically-indexed data <indexing.hierarchical>`, it's quite
+With :ref:`hierarchically-indexed data <advanced.hierarchical>`, it's quite
natural to group by one of the levels of the hierarchy.
.. ipython:: python
@@ -358,7 +358,7 @@ An obvious one is aggregation via the ``aggregate`` or equivalently ``agg`` meth
As you can see, the result of the aggregation will have the group names as the
new index along the grouped axis. In the case of multiple keys, the result is a
-:ref:`MultiIndex <indexing.hierarchical>` by default, though this can be
+:ref:`MultiIndex <advanced.hierarchical>` by default, though this can be
changed by using the ``as_index`` option:
.. ipython:: python
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index 4e1d2b471d1c0..a845e31d95e90 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -124,6 +124,7 @@ See the package overview for more detail about what's in the library.
basics
options
indexing
+ advanced
computation
missing_data
groupby
@@ -148,5 +149,6 @@ See the package overview for more detail about what's in the library.
{% endif -%}
{%if not single -%}
contributing
+ internals
release
{% endif -%}
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 4bde90a402456..c458dac22acca 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -58,10 +58,12 @@ indexing.
but instead subclass ``PandasObject``, similarly to the rest of the pandas objects. This should be
a transparent change with only very limited API implications (See the :ref:`Internal Refactoring <whatsnew_0150.refactoring>`)
+See the :ref:`MultiIndex / Advanced Indexing <advanced>` for ``MultiIndex`` and more advanced indexing documentation.
+
See the :ref:`cookbook<cookbook.selection>` for some advanced strategies
-Different Choices for Indexing (``loc``, ``iloc``, and ``ix``)
---------------------------------------------------------------
+Different Choices for Indexing
+------------------------------
.. versionadded:: 0.11.0
@@ -102,9 +104,9 @@ of multi-axis indexing.
whether the slice is interpreted as position based or label based, it's
usually better to be explicit and use ``.iloc`` or ``.loc``.
- See more at :ref:`Advanced Indexing <indexing.advanced>`, :ref:`Advanced
- Hierarchical <indexing.advanced_hierarchical>` and :ref:`Fallback Indexing
- <indexing.fallback>`
+ See more at :ref:`Advanced Indexing <advanced>`, :ref:`Advanced
+ Hierarchical <advanced.advanced_hierarchical>` and :ref:`Fallback Indexing
+ <advanced.fallback>`
Getting values from an object with multi-axes selection uses the following
notation (using ``.loc`` as an example, but applies to ``.iloc`` and ``.ix`` as
@@ -579,7 +581,7 @@ more complex criteria:
df2[criterion & (df2['b'] == 'x')]
Note, with the choice methods :ref:`Selection by Label <indexing.label>`, :ref:`Selection by Position <indexing.integer>`,
-and :ref:`Advanced Indexing <indexing.advanced>` you may select along more than one axis using boolean vectors combined with other indexing expressions.
+and :ref:`Advanced Indexing <advanced>` you may select along more than one axis using boolean vectors combined with other indexing expressions.
.. ipython:: python
@@ -1078,71 +1080,6 @@ floating point values generated using ``numpy.random.randn()``.
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
df2 = df.copy()
-Take Methods
-------------
-
-.. _indexing.take:
-
-Similar to numpy ndarrays, pandas Index, Series, and DataFrame also provides
-the ``take`` method that retrieves elements along a given axis at the given
-indices. The given indices must be either a list or an ndarray of integer
-index positions. ``take`` will also accept negative integers as relative positions to the end of the object.
-
-.. ipython:: python
-
- index = Index(randint(0, 1000, 10))
- index
-
- positions = [0, 9, 3]
-
- index[positions]
- index.take(positions)
-
- ser = Series(randn(10))
-
- ser.ix[positions]
- ser.take(positions)
-
-For DataFrames, the given indices should be a 1d list or ndarray that specifies
-row or column positions.
-
-.. ipython:: python
-
- frm = DataFrame(randn(5, 3))
-
- frm.take([1, 4, 3])
-
- frm.take([0, 2], axis=1)
-
-It is important to note that the ``take`` method on pandas objects are not
-intended to work on boolean indices and may return unexpected results.
-
-.. ipython:: python
-
- arr = randn(10)
- arr.take([False, False, True, True])
- arr[[0, 1]]
-
- ser = Series(randn(10))
- ser.take([False, False, True, True])
- ser.ix[[0, 1]]
-
-Finally, as a small note on performance, because the ``take`` method handles
-a narrower range of inputs, it can offer performance that is a good deal
-faster than fancy indexing.
-
-.. ipython::
-
- arr = randn(10000, 5)
- indexer = np.arange(10000)
- random.shuffle(indexer)
-
- timeit arr[indexer]
- timeit arr.take(indexer, axis=0)
-
- ser = Series(arr[:, 0])
- timeit ser.ix[indexer]
- timeit ser.take(indexer)
Duplicate Data
--------------
@@ -1183,229 +1120,228 @@ default value.
s.get('a') # equivalent to s['a']
s.get('x', default=-1)
-.. _indexing.advanced:
+The :meth:`~pandas.DataFrame.select` Method
+-------------------------------------------
-Advanced Indexing with ``.ix``
-------------------------------
+Another way to extract slices from an object is with the ``select`` method of
+Series, DataFrame, and Panel. This method should be used only when there is no
+more direct way. ``select`` takes a function which operates on labels along
+``axis`` and returns a boolean. For instance:
-.. note::
+.. ipython:: python
+
+ df.select(lambda x: x == 'A', axis=1)
- The recent addition of ``.loc`` and ``.iloc`` have enabled users to be quite
- explicit about indexing choices. ``.ix`` allows a great flexibility to
- specify indexing locations by *label* and/or *integer position*. pandas will
- attempt to use any passed *integer* as *label* locations first (like what
- ``.loc`` would do, then to fall back on *positional* indexing, like what
- ``.iloc`` would do). See :ref:`Fallback Indexing <indexing.fallback>` for
- an example.
+The :meth:`~pandas.DataFrame.lookup` Method
+-------------------------------------------
-The syntax of using ``.ix`` is identical to ``.loc``, in :ref:`Selection by
-Label <indexing.label>`, and ``.iloc`` in :ref:`Selection by Position <indexing.integer>`.
+Sometimes you want to extract a set of values given a sequence of row labels
+and column labels, and the ``lookup`` method allows for this and returns a
+numpy array. For instance,
-The ``.ix`` attribute takes the following inputs:
+.. ipython:: python
-- An integer or single label, e.g. ``5`` or ``'a'``
-- A list or array of labels ``['a', 'b', 'c']`` or integers ``[4, 3, 0]``
-- A slice object with ints ``1:7`` or labels ``'a':'f'``
-- A boolean array
+ dflookup = DataFrame(np.random.rand(20,4), columns = ['A','B','C','D'])
+ dflookup.lookup(list(range(0,10,2)), ['B','C','A','B','D'])
-We'll illustrate all of these methods. First, note that this provides a concise
-way of reindexing on multiple axes at once:
+.. _indexing.class:
-.. ipython:: python
+Index objects
+-------------
- subindex = dates[[3,4,5]]
- df.reindex(index=subindex, columns=['C', 'B'])
- df.ix[subindex, ['C', 'B']]
+The pandas :class:`~pandas.Index` class and its subclasses can be viewed as
+implementing an *ordered multiset*. Duplicates are allowed. However, if you try
+to convert an :class:`~pandas.Index` object with duplicate entries into a
+``set``, an exception will be raised.
-Assignment / setting values is possible when using ``ix``:
+:class:`~pandas.Index` also provides the infrastructure necessary for
+lookups, data alignment, and reindexing. The easiest way to create an
+:class:`~pandas.Index` directly is to pass a ``list`` or other sequence to
+:class:`~pandas.Index`:
.. ipython:: python
- df2 = df.copy()
- df2.ix[subindex, ['C', 'B']] = 0
- df2
+ index = Index(['e', 'd', 'a', 'b'])
+ index
+ 'd' in index
+
+You can also pass a ``name`` to be stored in the index:
-Indexing with an array of integers can also be done:
.. ipython:: python
- df.ix[[4,3,1]]
- df.ix[dates[[4,3,1]]]
+ index = Index(['e', 'd', 'a', 'b'], name='something')
+ index.name
-**Slicing** has standard Python semantics for integer slices:
+The name, if set, will be shown in the console display:
.. ipython:: python
- df.ix[1:7, :2]
+ index = Index(list(range(5)), name='rows')
+ columns = Index(['A', 'B', 'C'], name='cols')
+ df = DataFrame(np.random.randn(5, 3), index=index, columns=columns)
+ df
+ df['A']
-Slicing with labels is semantically slightly different because the slice start
-and stop are **inclusive** in the label-based case:
-.. ipython:: python
+Set operations on Index objects
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- date1, date2 = dates[[2, 4]]
- print(date1, date2)
- df.ix[date1:date2]
- df['A'].ix[date1:date2]
+.. _indexing.set_ops:
-Getting and setting rows in a DataFrame, especially by their location, is much
-easier:
+The three main operations are ``union (|)``, ``intersection (&)``, and ``diff
+(-)``. These can be directly called as instance methods or used via overloaded
+operators:
.. ipython:: python
- df2 = df[:5].copy()
- df2.ix[3]
- df2.ix[3] = np.arange(len(df2.columns))
- df2
+ a = Index(['c', 'b', 'a'])
+ b = Index(['c', 'e', 'd'])
+ a.union(b)
+ a | b
+ a & b
+ a - b
-Column or row selection can be combined as you would expect with arrays of
-labels or even boolean vectors:
+Also available is the ``sym_diff (^)`` operation, which returns elements
+that appear in either ``idx1`` or ``idx2`` but not both. This is
+equivalent to the Index created by ``(idx1 - idx2) + (idx2 - idx1)``,
+with duplicates dropped.
.. ipython:: python
- df.ix[df['A'] > 0, 'B']
- df.ix[date1:date2, 'B']
- df.ix[date1, 'B']
-
-Slicing with labels is closely related to the ``truncate`` method which does
-precisely ``.ix[start:stop]`` but returns a copy (for legacy reasons).
+ idx1 = Index([1, 2, 3, 4])
+ idx2 = Index([2, 3, 4, 5])
+ idx1.sym_diff(idx2)
+ idx1 ^ idx2
-The :meth:`~pandas.DataFrame.select` Method
--------------------------------------------
+Setting index metadata (``name(s)``, ``levels``, ``labels``)
+------------------------------------------------------------
-Another way to extract slices from an object is with the ``select`` method of
-Series, DataFrame, and Panel. This method should be used only when there is no
-more direct way. ``select`` takes a function which operates on labels along
-``axis`` and returns a boolean. For instance:
+.. versionadded:: 0.13.0
-.. ipython:: python
+.. _indexing.set_metadata:
- df.select(lambda x: x == 'A', axis=1)
+Indexes are "mostly immutable", but it is possible to set and change their
+metadata, like the index ``name`` (or, for ``MultiIndex``, ``levels`` and
+``labels``).
-The :meth:`~pandas.DataFrame.lookup` Method
--------------------------------------------
+You can use the ``rename``, ``set_names``, ``set_levels``, and ``set_labels``
+to set these attributes directly. They default to returning a copy; however,
+you can specify ``inplace=True`` to have the data change in place.
-Sometimes you want to extract a set of values given a sequence of row labels
-and column labels, and the ``lookup`` method allows for this and returns a
-numpy array. For instance,
+See :ref:`Advanced Indexing <advanced>` for usage of MultiIndexes.
.. ipython:: python
- dflookup = DataFrame(np.random.rand(20,4), columns = ['A','B','C','D'])
- dflookup.lookup(list(range(0,10,2)), ['B','C','A','B','D'])
+ ind = Index([1, 2, 3])
+ ind.rename("apple")
+ ind
+ ind.set_names(["apple"], inplace=True)
+ ind.name = "bob"
+ ind
-.. _indexing.float64index:
+.. versionadded:: 0.15.0
-Float64Index
-------------
+``set_names``, ``set_levels``, and ``set_labels`` also take an optional
+`level`` argument
-.. note::
+.. ipython:: python
- As of 0.14.0, ``Float64Index`` is backed by a native ``float64`` dtype
- array. Prior to 0.14.0, ``Float64Index`` was backed by an ``object`` dtype
- array. Using a ``float64`` dtype in the backend speeds up arithmetic
- operations by about 30x and boolean indexing operations on the
- ``Float64Index`` itself are about 2x as fast.
+ index = MultiIndex.from_product([range(3), ['one', 'two']], names=['first', 'second'])
+ index
+ index.levels[1]
+ index.set_levels(["a", "b"], level=1)
-.. versionadded:: 0.13.0
+Set / Reset Index
+-----------------
-By default a ``Float64Index`` will be automatically created when passing floating, or mixed-integer-floating values in index creation.
-This enables a pure label-based slicing paradigm that makes ``[],ix,loc`` for scalar indexing and slicing work exactly the
-same.
+Occasionally you will load or create a data set into a DataFrame and want to
+add an index after you've already done so. There are a couple of different
+ways.
-.. ipython:: python
+Set an index
+~~~~~~~~~~~~
- indexf = Index([1.5, 2, 3, 4.5, 5])
- indexf
- sf = Series(range(5),index=indexf)
- sf
+.. _indexing.set_index:
-Scalar selection for ``[],.ix,.loc`` will always be label based. An integer will match an equal float index (e.g. ``3`` is equivalent to ``3.0``)
+DataFrame has a ``set_index`` method which takes a column name (for a regular
+``Index``) or a list of column names (for a ``MultiIndex``), to create a new,
+indexed DataFrame:
.. ipython:: python
+ :suppress:
- sf[3]
- sf[3.0]
- sf.ix[3]
- sf.ix[3.0]
- sf.loc[3]
- sf.loc[3.0]
-
-The only positional indexing is via ``iloc``
+ data = DataFrame({'a' : ['bar', 'bar', 'foo', 'foo'],
+ 'b' : ['one', 'two', 'one', 'two'],
+ 'c' : ['z', 'y', 'x', 'w'],
+ 'd' : [1., 2., 3, 4]})
.. ipython:: python
- sf.iloc[3]
-
-A scalar index that is not found will raise ``KeyError``
+ data
+ indexed1 = data.set_index('c')
+ indexed1
+ indexed2 = data.set_index(['a', 'b'])
+ indexed2
-Slicing is ALWAYS on the values of the index, for ``[],ix,loc`` and ALWAYS positional with ``iloc``
+The ``append`` keyword option allow you to keep the existing index and append
+the given columns to a MultiIndex:
.. ipython:: python
- sf[2:4]
- sf.ix[2:4]
- sf.loc[2:4]
- sf.iloc[2:4]
+ frame = data.set_index('c', drop=False)
+ frame = frame.set_index(['a', 'b'], append=True)
+ frame
-In float indexes, slicing using floats is allowed
+Other options in ``set_index`` allow you not drop the index columns or to add
+the index in-place (without creating a new object):
.. ipython:: python
- sf[2.1:4.6]
- sf.loc[2.1:4.6]
-
-In non-float indexes, slicing using floats will raise a ``TypeError``
-
-.. code-block:: python
+ data.set_index('c', drop=False)
+ data.set_index(['a', 'b'], inplace=True)
+ data
- In [1]: Series(range(5))[3.5]
- TypeError: the label [3.5] is not a proper indexer for this index type (Int64Index)
+Reset the index
+~~~~~~~~~~~~~~~
- In [1]: Series(range(5))[3.5:4.5]
- TypeError: the slice start [3.5] is not a proper indexer for this index type (Int64Index)
+As a convenience, there is a new function on DataFrame called ``reset_index``
+which transfers the index values into the DataFrame's columns and sets a simple
+integer index. This is the inverse operation to ``set_index``
-Using a scalar float indexer will be deprecated in a future version, but is allowed for now.
+.. ipython:: python
-.. code-block:: python
+ data
+ data.reset_index()
- In [3]: Series(range(5))[3.0]
- Out[3]: 3
+The output is more similar to a SQL table or a record array. The names for the
+columns derived from the index are the ones stored in the ``names`` attribute.
-Here is a typical use-case for using this type of indexing. Imagine that you have a somewhat
-irregular timedelta-like indexing scheme, but the data is recorded as floats. This could for
-example be millisecond offsets.
+You can use the ``level`` keyword to remove only a portion of the index:
.. ipython:: python
- dfir = concat([DataFrame(randn(5,2),
- index=np.arange(5) * 250.0,
- columns=list('AB')),
- DataFrame(randn(6,2),
- index=np.arange(4,10) * 250.1,
- columns=list('AB'))])
- dfir
-
-Selection operations then will always work on a value basis, for all selection operators.
+ frame
+ frame.reset_index(level=1)
-.. ipython:: python
- dfir[0:1000.4]
- dfir.loc[0:1001,'A']
- dfir.loc[1000.4]
+``reset_index`` takes an optional parameter ``drop`` which if true simply
+discards the index, instead of putting index values in the DataFrame's columns.
-You could then easily pick out the first 1 second (1000 ms) of data then.
+.. note::
-.. ipython:: python
+ The ``reset_index`` method used to be called ``delevel`` which is now
+ deprecated.
- dfir[0:1000]
+Adding an ad hoc index
+~~~~~~~~~~~~~~~~~~~~~~
-Of course if you need integer based selection, then use ``iloc``
+If you create an index yourself, you can just assign it to the ``index`` field:
-.. ipython:: python
+.. code-block:: python
- dfir.iloc[0:5]
+ data.index = index
.. _indexing.view_versus_copy:
@@ -1539,800 +1475,3 @@ This will **not** work at all, and so should be avoided
reported.
-Fallback indexing
------------------
-
-.. _indexing.fallback:
-
-Float indexes should be used only with caution. If you have a float indexed
-``DataFrame`` and try to select using an integer, the row that pandas returns
-might not be what you expect. pandas first attempts to use the *integer*
-as a *label* location, but fails to find a match (because the types
-are not equal). pandas then falls back to back to positional indexing.
-
-.. ipython:: python
-
- df = pd.DataFrame(np.random.randn(4,4),
- columns=list('ABCD'), index=[1.0, 2.0, 3.0, 4.0])
- df
- df.ix[1]
-
-To select the row you do expect, instead use a float label or
-use ``iloc``.
-
-.. ipython:: python
-
- df.ix[1.0]
- df.iloc[0]
-
-Instead of using a float index, it is often better to
-convert to an integer index:
-
-.. ipython:: python
-
- df_new = df.reset_index()
- df_new[df_new['index'] == 1.0]
- # now you can also do "float selection"
- df_new[(df_new['index'] >= 1.0) & (df_new['index'] < 2)]
-
-
-.. _indexing.class:
-
-Index objects
--------------
-
-The pandas :class:`~pandas.Index` class and its subclasses can be viewed as
-implementing an *ordered multiset*. Duplicates are allowed. However, if you try
-to convert an :class:`~pandas.Index` object with duplicate entries into a
-``set``, an exception will be raised.
-
-:class:`~pandas.Index` also provides the infrastructure necessary for
-lookups, data alignment, and reindexing. The easiest way to create an
-:class:`~pandas.Index` directly is to pass a ``list`` or other sequence to
-:class:`~pandas.Index`:
-
-.. ipython:: python
-
- index = Index(['e', 'd', 'a', 'b'])
- index
- 'd' in index
-
-You can also pass a ``name`` to be stored in the index:
-
-
-.. ipython:: python
-
- index = Index(['e', 'd', 'a', 'b'], name='something')
- index.name
-
-Starting with pandas 0.5, the name, if set, will be shown in the console
-display:
-
-.. ipython:: python
-
- index = Index(list(range(5)), name='rows')
- columns = Index(['A', 'B', 'C'], name='cols')
- df = DataFrame(np.random.randn(5, 3), index=index, columns=columns)
- df
- df['A']
-
-.. _indexing.setops:
-
-Set operations on Index objects
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. warning::
-
- In 0.15.0. the set operations ``+`` and ``-`` were deprecated in order to provide these for numeric type operations on certain
- index types. ``+`` can be replace by ``.union()`` or ``|``, and ``-`` by ``.difference()``.
-
-.. _indexing.set_ops:
-
-The two main operations are ``union (|)``, ``intersection (&)``
-These can be directly called as instance methods or used via overloaded
-operators. Difference is provided via the ``.difference()`` method.
-
-.. ipython:: python
-
- a = Index(['c', 'b', 'a'])
- b = Index(['c', 'e', 'd'])
- a | b
- a & b
- a.difference(b)
-
-Also available is the ``sym_diff (^)`` operation, which returns elements
-that appear in either ``idx1`` or ``idx2`` but not both. This is
-equivalent to the Index created by ``(idx1.difference(idx2)).union(idx2.difference(idx1))``,
-with duplicates dropped.
-
-.. ipython:: python
-
- idx1 = Index([1, 2, 3, 4])
- idx2 = Index([2, 3, 4, 5])
- idx1.sym_diff(idx2)
- idx1 ^ idx2
-
-.. _indexing.hierarchical:
-
-Hierarchical indexing (MultiIndex)
-----------------------------------
-
-Hierarchical indexing (also referred to as "multi-level" indexing) is brand new
-in the pandas 0.4 release. It is very exciting as it opens the door to some
-quite sophisticated data analysis and manipulation, especially for working with
-higher dimensional data. In essence, it enables you to store and manipulate
-data with an arbitrary number of dimensions in lower dimensional data
-structures like Series (1d) and DataFrame (2d).
-
-In this section, we will show what exactly we mean by "hierarchical" indexing
-and how it integrates with the all of the pandas indexing functionality
-described above and in prior sections. Later, when discussing :ref:`group by
-<groupby>` and :ref:`pivoting and reshaping data <reshaping>`, we'll show
-non-trivial applications to illustrate how it aids in structuring data for
-analysis.
-
-See the :ref:`cookbook<cookbook.multi_index>` for some advanced strategies
-
-Creating a MultiIndex (hierarchical index) object
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The ``MultiIndex`` object is the hierarchical analogue of the standard
-``Index`` object which typically stores the axis labels in pandas objects. You
-can think of ``MultiIndex`` an array of tuples where each tuple is unique. A
-``MultiIndex`` can be created from a list of arrays (using
-``MultiIndex.from_arrays``), an array of tuples (using
-``MultiIndex.from_tuples``), or a crossed set of iterables (using
-``MultiIndex.from_product``). The ``Index`` constructor will attempt to return
-a ``MultiIndex`` when it is passed a list of tuples. The following examples
-demo different ways to initialize MultiIndexes.
-
-
-.. ipython:: python
-
- arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
- ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
- tuples = list(zip(*arrays))
- tuples
-
- index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
- index
-
- s = Series(randn(8), index=index)
- s
-
-When you want every pairing of the elements in two iterables, it can be easier
-to use the ``MultiIndex.from_product`` function:
-
-.. ipython:: python
-
- iterables = [['bar', 'baz', 'foo', 'qux'], ['one', 'two']]
- MultiIndex.from_product(iterables, names=['first', 'second'])
-
-As a convenience, you can pass a list of arrays directly into Series or
-DataFrame to construct a MultiIndex automatically:
-
-.. ipython:: python
-
- arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'])
- ,
- np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])
- ]
- s = Series(randn(8), index=arrays)
- s
- df = DataFrame(randn(8, 4), index=arrays)
- df
-
-All of the ``MultiIndex`` constructors accept a ``names`` argument which stores
-string names for the levels themselves. If no names are provided, ``None`` will
-be assigned:
-
-.. ipython:: python
-
- df.index.names
-
-This index can back any axis of a pandas object, and the number of **levels**
-of the index is up to you:
-
-.. ipython:: python
-
- df = DataFrame(randn(3, 8), index=['A', 'B', 'C'], columns=index)
- df
- DataFrame(randn(6, 6), index=index[:6], columns=index[:6])
-
-We've "sparsified" the higher levels of the indexes to make the console output a
-bit easier on the eyes.
-
-It's worth keeping in mind that there's nothing preventing you from using
-tuples as atomic labels on an axis:
-
-.. ipython:: python
-
- Series(randn(8), index=tuples)
-
-The reason that the ``MultiIndex`` matters is that it can allow you to do
-grouping, selection, and reshaping operations as we will describe below and in
-subsequent areas of the documentation. As you will see in later sections, you
-can find yourself working with hierarchically-indexed data without creating a
-``MultiIndex`` explicitly yourself. However, when loading data from a file, you
-may wish to generate your own ``MultiIndex`` when preparing the data set.
-
-Note that how the index is displayed by be controlled using the
-``multi_sparse`` option in ``pandas.set_printoptions``:
-
-.. ipython:: python
-
- pd.set_option('display.multi_sparse', False)
- df
- pd.set_option('display.multi_sparse', True)
-
-Reconstructing the level labels
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. _indexing.get_level_values:
-
-The method ``get_level_values`` will return a vector of the labels for each
-location at a particular level:
-
-.. ipython:: python
-
- index.get_level_values(0)
- index.get_level_values('second')
-
-
-Basic indexing on axis with MultiIndex
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-One of the important features of hierarchical indexing is that you can select
-data by a "partial" label identifying a subgroup in the data. **Partial**
-selection "drops" levels of the hierarchical index in the result in a
-completely analogous way to selecting a column in a regular DataFrame:
-
-.. ipython:: python
-
- df['bar']
- df['bar', 'one']
- df['bar']['one']
- s['qux']
-
-See :ref:`Cross-section with hierarchical index <indexing.xs>` for how to select
-on a deeper level.
-
-
-Data alignment and using ``reindex``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Operations between differently-indexed objects having ``MultiIndex`` on the
-axes will work as you expect; data alignment will work the same as an Index of
-tuples:
-
-.. ipython:: python
-
- s + s[:-2]
- s + s[::2]
-
-``reindex`` can be called with another ``MultiIndex`` or even a list or array
-of tuples:
-
-.. ipython:: python
-
- s.reindex(index[:3])
- s.reindex([('foo', 'two'), ('bar', 'one'), ('qux', 'one'), ('baz', 'one')])
-
-.. _indexing.advanced_hierarchical:
-
-Advanced indexing with hierarchical index
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Syntactically integrating ``MultiIndex`` in advanced indexing with ``.loc/.ix`` is a
-bit challenging, but we've made every effort to do so. for example the
-following works as you would expect:
-
-.. ipython:: python
-
- df = df.T
- df
- df.loc['bar']
- df.loc['bar', 'two']
-
-"Partial" slicing also works quite nicely.
-
-.. ipython:: python
-
- df.loc['baz':'foo']
-
-You can slice with a 'range' of values, by providing a slice of tuples.
-
-.. ipython:: python
-
- df.loc[('baz', 'two'):('qux', 'one')]
- df.loc[('baz', 'two'):'foo']
-
-Passing a list of labels or tuples works similar to reindexing:
-
-.. ipython:: python
-
- df.ix[[('bar', 'two'), ('qux', 'one')]]
-
-.. _indexing.mi_slicers:
-
-Multiindexing using slicers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. versionadded:: 0.14.0
-
-In 0.14.0 we added a new way to slice multi-indexed objects.
-You can slice a multi-index by providing multiple indexers.
-
-You can provide any of the selectors as if you are indexing by label, see :ref:`Selection by Label <indexing.label>`,
-including slices, lists of labels, labels, and boolean indexers.
-
-You can use ``slice(None)`` to select all the contents of *that* level. You do not need to specify all the
-*deeper* levels, they will be implied as ``slice(None)``.
-
-As usual, **both sides** of the slicers are included as this is label indexing.
-
-.. warning::
-
- You should specify all axes in the ``.loc`` specifier, meaning the indexer for the **index** and
- for the **columns**. Their are some ambiguous cases where the passed indexer could be mis-interpreted
- as indexing *both* axes, rather than into say the MuliIndex for the rows.
-
- You should do this:
-
- .. code-block:: python
-
- df.loc[(slice('A1','A3'),.....),:]
-
- rather than this:
-
- .. code-block:: python
-
- df.loc[(slice('A1','A3'),.....)]
-
-.. warning::
-
- You will need to make sure that the selection axes are fully lexsorted!
-
-.. ipython:: python
-
- def mklbl(prefix,n):
- return ["%s%s" % (prefix,i) for i in range(n)]
-
- miindex = MultiIndex.from_product([mklbl('A',4),
- mklbl('B',2),
- mklbl('C',4),
- mklbl('D',2)])
- micolumns = MultiIndex.from_tuples([('a','foo'),('a','bar'),
- ('b','foo'),('b','bah')],
- names=['lvl0', 'lvl1'])
- dfmi = DataFrame(np.arange(len(miindex)*len(micolumns)).reshape((len(miindex),len(micolumns))),
- index=miindex,
- columns=micolumns).sortlevel().sortlevel(axis=1)
- dfmi
-
-Basic multi-index slicing using slices, lists, and labels.
-
-.. ipython:: python
-
- dfmi.loc[(slice('A1','A3'),slice(None), ['C1','C3']),:]
-
-You can use a ``pd.IndexSlice`` to shortcut the creation of these slices
-
-.. ipython:: python
-
- idx = pd.IndexSlice
- dfmi.loc[idx[:,:,['C1','C3']],idx[:,'foo']]
-
-It is possible to perform quite complicated selections using this method on multiple
-axes at the same time.
-
-.. ipython:: python
-
- dfmi.loc['A1',(slice(None),'foo')]
- dfmi.loc[idx[:,:,['C1','C3']],idx[:,'foo']]
-
-Using a boolean indexer you can provide selection related to the *values*.
-
-.. ipython:: python
-
- mask = dfmi[('a','foo')]>200
- dfmi.loc[idx[mask,:,['C1','C3']],idx[:,'foo']]
-
-You can also specify the ``axis`` argument to ``.loc`` to interpret the passed
-slicers on a single axis.
-
-.. ipython:: python
-
- dfmi.loc(axis=0)[:,:,['C1','C3']]
-
-Furthermore you can *set* the values using these methods
-
-.. ipython:: python
-
- df2 = dfmi.copy()
- df2.loc(axis=0)[:,:,['C1','C3']] = -10
- df2
-
-You can use a right-hand-side of an alignable object as well.
-
-.. ipython:: python
-
- df2 = dfmi.copy()
- df2.loc[idx[:,:,['C1','C3']],:] = df2*1000
- df2
-
-.. _indexing.xs:
-
-Cross-section with hierarchical index
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The ``xs`` method of ``DataFrame`` additionally takes a level argument to make
-selecting data at a particular level of a MultiIndex easier.
-
-.. ipython:: python
-
- df.xs('one', level='second')
-
-.. ipython:: python
-
- # using the slicers (new in 0.14.0)
- df.loc[(slice(None),'one'),:]
-
-You can also select on the columns with :meth:`~pandas.MultiIndex.xs`, by
-providing the axis argument
-
-.. ipython:: python
-
- df = df.T
- df.xs('one', level='second', axis=1)
-
-.. ipython:: python
-
- # using the slicers (new in 0.14.0)
- df.loc[:,(slice(None),'one')]
-
-:meth:`~pandas.MultiIndex.xs` also allows selection with multiple keys
-
-.. ipython:: python
-
- df.xs(('one', 'bar'), level=('second', 'first'), axis=1)
-
-.. ipython:: python
-
- # using the slicers (new in 0.14.0)
- df.loc[:,('bar','one')]
-
-.. versionadded:: 0.13.0
-
-You can pass ``drop_level=False`` to :meth:`~pandas.MultiIndex.xs` to retain
-the level that was selected
-
-.. ipython:: python
-
- df.xs('one', level='second', axis=1, drop_level=False)
-
-versus the result with ``drop_level=True`` (the default value)
-
-.. ipython:: python
-
- df.xs('one', level='second', axis=1, drop_level=True)
-
-.. ipython:: python
- :suppress:
-
- df = df.T
-
-.. _indexing.advanced_reindex:
-
-Advanced reindexing and alignment with hierarchical index
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The parameter ``level`` has been added to the ``reindex`` and ``align`` methods
-of pandas objects. This is useful to broadcast values across a level. For
-instance:
-
-.. ipython:: python
-
- midx = MultiIndex(levels=[['zero', 'one'], ['x','y']],
- labels=[[1,1,0,0],[1,0,1,0]])
- df = DataFrame(randn(4,2), index=midx)
- print(df)
- df2 = df.mean(level=0)
- print(df2)
- print(df2.reindex(df.index, level=0))
- df_aligned, df2_aligned = df.align(df2, level=0)
- print(df_aligned)
- print(df2_aligned)
-
-
-The need for sortedness with :class:`~pandas.MultiIndex`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-**Caveat emptor**: the present implementation of ``MultiIndex`` requires that
-the labels be sorted for some of the slicing / indexing routines to work
-correctly. You can think about breaking the axis into unique groups, where at
-the hierarchical level of interest, each distinct group shares a label, but no
-two have the same label. However, the ``MultiIndex`` does not enforce this:
-**you are responsible for ensuring that things are properly sorted**. There is
-an important new method ``sortlevel`` to sort an axis within a ``MultiIndex``
-so that its labels are grouped and sorted by the original ordering of the
-associated factor at that level. Note that this does not necessarily mean the
-labels will be sorted lexicographically!
-
-.. ipython:: python
-
- import random; random.shuffle(tuples)
- s = Series(randn(8), index=MultiIndex.from_tuples(tuples))
- s
- s.sortlevel(0)
- s.sortlevel(1)
-
-.. _indexing.sortlevel_byname:
-
-Note, you may also pass a level name to ``sortlevel`` if the MultiIndex levels
-are named.
-
-.. ipython:: python
-
- s.index.set_names(['L1', 'L2'], inplace=True)
- s.sortlevel(level='L1')
- s.sortlevel(level='L2')
-
-Some indexing will work even if the data are not sorted, but will be rather
-inefficient and will also return a copy of the data rather than a view:
-
-.. ipython:: python
-
- s['qux']
- s.sortlevel(1)['qux']
-
-On higher dimensional objects, you can sort any of the other axes by level if
-they have a MultiIndex:
-
-.. ipython:: python
-
- df.T.sortlevel(1, axis=1)
-
-The ``MultiIndex`` object has code to **explicity check the sort depth**. Thus,
-if you try to index at a depth at which the index is not sorted, it will raise
-an exception. Here is a concrete example to illustrate this:
-
-.. ipython:: python
-
- tuples = [('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')]
- idx = MultiIndex.from_tuples(tuples)
- idx.lexsort_depth
-
- reordered = idx[[1, 0, 3, 2]]
- reordered.lexsort_depth
-
- s = Series(randn(4), index=reordered)
- s.ix['a':'a']
-
-However:
-
-::
-
- >>> s.ix[('a', 'b'):('b', 'a')]
- Traceback (most recent call last)
- ...
- KeyError: Key length (3) was greater than MultiIndex lexsort depth (2)
-
-Swapping levels with :meth:`~pandas.MultiIndex.swaplevel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The ``swaplevel`` function can switch the order of two levels:
-
-.. ipython:: python
-
- df[:5]
- df[:5].swaplevel(0, 1, axis=0)
-
-.. _indexing.reorderlevels:
-
-Reordering levels with :meth:`~pandas.MultiIndex.reorder_levels`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The ``reorder_levels`` function generalizes the ``swaplevel`` function,
-allowing you to permute the hierarchical index levels in one step:
-
-.. ipython:: python
-
- df[:5].reorder_levels([1,0], axis=0)
-
-
-Some gory internal details
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Internally, the ``MultiIndex`` consists of a few things: the **levels**, the
-integer **labels**, and the level **names**:
-
-.. ipython:: python
-
- index
- index.levels
- index.labels
- index.names
-
-You can probably guess that the labels determine which unique element is
-identified with that location at each layer of the index. It's important to
-note that sortedness is determined **solely** from the integer labels and does
-not check (or care) whether the levels themselves are sorted. Fortunately, the
-constructors ``from_tuples`` and ``from_arrays`` ensure that this is true, but
-if you compute the levels and labels yourself, please be careful.
-
-
-Setting index metadata (``name(s)``, ``levels``, ``labels``)
-------------------------------------------------------------
-
-.. versionadded:: 0.13.0
-
-.. _indexing.set_metadata:
-
-Indexes are "mostly immutable", but it is possible to set and change their
-metadata, like the index ``name`` (or, for ``MultiIndex``, ``levels`` and
-``labels``).
-
-You can use the ``rename``, ``set_names``, ``set_levels``, and ``set_labels``
-to set these attributes directly. They default to returning a copy; however,
-you can specify ``inplace=True`` to have the data change in place.
-
-.. ipython:: python
-
- ind = Index([1, 2, 3])
- ind.rename("apple")
- ind
- ind.set_names(["apple"], inplace=True)
- ind.name = "bob"
- ind
-
-.. versionadded:: 0.15.0
-
-``set_names``, ``set_levels``, and ``set_labels`` also take an optional
-`level`` argument
-
-.. ipython:: python
-
- index
- index.levels[1]
- index.set_levels(["a", "b"], level=1)
-
-Adding an index to an existing DataFrame
-----------------------------------------
-
-Occasionally you will load or create a data set into a DataFrame and want to
-add an index after you've already done so. There are a couple of different
-ways.
-
-Add an index using DataFrame columns
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. _indexing.set_index:
-
-DataFrame has a ``set_index`` method which takes a column name (for a regular
-``Index``) or a list of column names (for a ``MultiIndex``), to create a new,
-indexed DataFrame:
-
-.. ipython:: python
- :suppress:
-
- data = DataFrame({'a' : ['bar', 'bar', 'foo', 'foo'],
- 'b' : ['one', 'two', 'one', 'two'],
- 'c' : ['z', 'y', 'x', 'w'],
- 'd' : [1., 2., 3, 4]})
-
-.. ipython:: python
-
- data
- indexed1 = data.set_index('c')
- indexed1
- indexed2 = data.set_index(['a', 'b'])
- indexed2
-
-The ``append`` keyword option allow you to keep the existing index and append
-the given columns to a MultiIndex:
-
-.. ipython:: python
-
- frame = data.set_index('c', drop=False)
- frame = frame.set_index(['a', 'b'], append=True)
- frame
-
-Other options in ``set_index`` allow you not drop the index columns or to add
-the index in-place (without creating a new object):
-
-.. ipython:: python
-
- data.set_index('c', drop=False)
- data.set_index(['a', 'b'], inplace=True)
- data
-
-Remove / reset the index, ``reset_index``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-As a convenience, there is a new function on DataFrame called ``reset_index``
-which transfers the index values into the DataFrame's columns and sets a simple
-integer index. This is the inverse operation to ``set_index``
-
-.. ipython:: python
-
- data
- data.reset_index()
-
-The output is more similar to a SQL table or a record array. The names for the
-columns derived from the index are the ones stored in the ``names`` attribute.
-
-You can use the ``level`` keyword to remove only a portion of the index:
-
-.. ipython:: python
-
- frame
- frame.reset_index(level=1)
-
-
-``reset_index`` takes an optional parameter ``drop`` which if true simply
-discards the index, instead of putting index values in the DataFrame's columns.
-
-.. note::
-
- The ``reset_index`` method used to be called ``delevel`` which is now
- deprecated.
-
-Adding an ad hoc index
-~~~~~~~~~~~~~~~~~~~~~~
-
-If you create an index yourself, you can just assign it to the ``index`` field:
-
-.. code-block:: python
-
- data.index = index
-
-Indexing internal details
--------------------------
-
-.. note::
-
- The following is largely relevant for those actually working on the pandas
- codebase. The source code is still the best place to look at the specifics
- of how things are implemented.
-
-In pandas there are a few objects implemented which can serve as valid
-containers for the axis labels:
-
- - ``Index``: the generic "ordered set" object, an ndarray of object dtype
- assuming nothing about its contents. The labels must be hashable (and
- likely immutable) and unique. Populates a dict of label to location in
- Cython to do :math:`O(1)` lookups.
- - ``Int64Index``: a version of ``Index`` highly optimized for 64-bit integer
- data, such as time stamps
- - ``MultiIndex``: the standard hierarchical index object
- - ``PeriodIndex``: An Index object with Period elements
- - ``DatetimeIndex``: An Index object with Timestamp elements
- - ``date_range``: fixed frequency date range generated from a time rule or
- DateOffset. An ndarray of Python datetime objects
-
-The motivation for having an ``Index`` class in the first place was to enable
-different implementations of indexing. This means that it's possible for you,
-the user, to implement a custom ``Index`` subclass that may be better suited to
-a particular application than the ones provided in pandas.
-
-From an internal implementation point of view, the relevant methods that an
-``Index`` must define are one or more of the following (depending on how
-incompatible the new object internals are with the ``Index`` functions):
-
- - ``get_loc``: returns an "indexer" (an integer, or in some cases a
- slice object) for a label
- - ``slice_locs``: returns the "range" to slice between two labels
- - ``get_indexer``: Computes the indexing vector for reindexing / data
- alignment purposes. See the source / docstrings for more on this
- - ``get_indexer_non_unique``: Computes the indexing vector for reindexing / data
- alignment purposes when the index is non-unique. See the source / docstrings
- for more on this
- - ``reindex``: Does any pre-conversion of the input index then calls
- ``get_indexer``
- - ``union``, ``intersection``: computes the union or intersection of two
- Index objects
- - ``insert``: Inserts a new label into an Index, yielding a new object
- - ``delete``: Delete a label, yielding a new object
- - ``drop``: Deletes a set of labels
- - ``take``: Analogous to ndarray.take
diff --git a/doc/source/internals.rst b/doc/source/internals.rst
new file mode 100644
index 0000000000000..e5d2b001c18f8
--- /dev/null
+++ b/doc/source/internals.rst
@@ -0,0 +1,96 @@
+.. _internals:
+
+.. currentmodule:: pandas
+
+.. ipython:: python
+ :suppress:
+
+ import numpy as np
+ import random
+ np.random.seed(123456)
+ from pandas import *
+ options.display.max_rows=15
+ import pandas as pd
+ randn = np.random.randn
+ randint = np.random.randint
+ np.set_printoptions(precision=4, suppress=True)
+ from pandas.compat import range, zip
+
+*********
+Internals
+*********
+
+This section will provide a look into some of pandas internals.
+
+Indexing
+--------
+
+In pandas there are a few objects implemented which can serve as valid
+containers for the axis labels:
+
+- ``Index``: the generic "ordered set" object, an ndarray of object dtype
+ assuming nothing about its contents. The labels must be hashable (and
+ likely immutable) and unique. Populates a dict of label to location in
+ Cython to do ``O(1)`` lookups.
+- ``Int64Index``: a version of ``Index`` highly optimized for 64-bit integer
+ data, such as time stamps
+- ``Float64Index``: a version of ``Index`` highly optimized for 64-bit float data
+- ``MultiIndex``: the standard hierarchical index object
+- ``DatetimeIndex``: An Index object with Timestamp elements
+- ``PeriodIndex``: An Index object with Period elements
+
+These are range generates to make the creation of a regular index easy:
+
+- ``date_range``: fixed frequency date range generated from a time rule or
+ DateOffset. An ndarray of Python datetime objects
+- ``period_range``: fixed frequency date range generated from a time rule or
+ DateOffset. An ndarray of ``Period`` objects, representing Timespans
+
+The motivation for having an ``Index`` class in the first place was to enable
+different implementations of indexing. This means that it's possible for you,
+the user, to implement a custom ``Index`` subclass that may be better suited to
+a particular application than the ones provided in pandas.
+
+From an internal implementation point of view, the relevant methods that an
+``Index`` must define are one or more of the following (depending on how
+incompatible the new object internals are with the ``Index`` functions):
+
+- ``get_loc``: returns an "indexer" (an integer, or in some cases a
+ slice object) for a label
+- ``slice_locs``: returns the "range" to slice between two labels
+- ``get_indexer``: Computes the indexing vector for reindexing / data
+ alignment purposes. See the source / docstrings for more on this
+- ``get_indexer_non_unique``: Computes the indexing vector for reindexing / data
+ alignment purposes when the index is non-unique. See the source / docstrings
+ for more on this
+- ``reindex``: Does any pre-conversion of the input index then calls
+ ``get_indexer``
+- ``union``, ``intersection``: computes the union or intersection of two
+ Index objects
+- ``insert``: Inserts a new label into an Index, yielding a new object
+- ``delete``: Delete a label, yielding a new object
+- ``drop``: Deletes a set of labels
+- ``take``: Analogous to ndarray.take
+
+MultiIndex
+~~~~~~~~~~
+
+Internally, the ``MultiIndex`` consists of a few things: the **levels**, the
+integer **labels**, and the level **names**:
+
+.. ipython:: python
+
+ index = MultiIndex.from_product([range(3), ['one', 'two']], names=['first', 'second'])
+ index
+ index.levels
+ index.labels
+ index.names
+
+You can probably guess that the labels determine which unique element is
+identified with that location at each layer of the index. It's important to
+note that sortedness is determined **solely** from the integer labels and does
+not check (or care) whether the levels themselves are sorted. Fortunately, the
+constructors ``from_tuples`` and ``from_arrays`` ensure that this is true, but
+if you compute the levels and labels yourself, please be careful.
+
+
diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index 55bbf613b33cf..922fb84c57a56 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -90,7 +90,7 @@ this using the ``keys`` argument:
concatenated
As you can see (if you've read the rest of the documentation), the resulting
-object's index has a :ref:`hierarchical index <indexing.hierarchical>`. This
+object's index has a :ref:`hierarchical index <advanced.hierarchical>`. This
means that we can now do stuff like select out each chunk by key:
.. ipython:: python
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 60342f1b6cba5..ddbfc60a5dfe7 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -77,7 +77,7 @@ this form, use the ``pivot`` function:
If the ``values`` argument is omitted, and the input DataFrame has more than
one column of values which are not used as column or index inputs to ``pivot``,
then the resulting "pivoted" DataFrame will have :ref:`hierarchical columns
-<indexing.hierarchical>` whose topmost level indicates the respective value
+<advanced.hierarchical>` whose topmost level indicates the respective value
column:
.. ipython:: python
@@ -103,7 +103,7 @@ Reshaping by stacking and unstacking
Closely related to the ``pivot`` function are the related ``stack`` and
``unstack`` functions currently available on Series and DataFrame. These
functions are designed to work together with ``MultiIndex`` objects (see the
-section on :ref:`hierarchical indexing <indexing.hierarchical>`). Here are
+section on :ref:`hierarchical indexing <advanced.hierarchical>`). Here are
essentially what these functions do:
- ``stack``: "pivot" a level of the (possibly hierarchical) column labels,
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt
index 3a56794151b1e..befdf848ad23b 100644
--- a/doc/source/v0.11.0.txt
+++ b/doc/source/v0.11.0.txt
@@ -50,8 +50,7 @@ three types of multi-axis indexing.
is interpreted as position based or label based, it's usually better to be
explicit and use ``.iloc`` or ``.loc``.
- See more at :ref:`Advanced Indexing <indexing.advanced>`, :ref:`Advanced Hierarchical <indexing.advanced_hierarchical>` and
- :ref:`Fallback Indexing <indexing.fallback>`
+ See more at :ref:`Advanced Indexing <advanced>` and :ref:`Advanced Hierarchical <advanced.advanced_hierarchical>`.
Selection Deprecations
diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt
index 96ab3d1e58d5c..e2f96f204edab 100644
--- a/doc/source/v0.14.0.txt
+++ b/doc/source/v0.14.0.txt
@@ -470,7 +470,7 @@ You can use ``slice(None)`` to select all the contents of *that* level. You do n
As usual, **both sides** of the slicers are included as this is label indexing.
-See :ref:`the docs<indexing.mi_slicers>`
+See :ref:`the docs<advanced.mi_slicers>`
See also issues (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :issue:`5641`, :issue:`7106`)
.. warning::
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index dd71ef1f63d54..5d514d71b30a5 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -20,6 +20,8 @@ users upgrade to this version.
- New datetimelike properties accessor ``.dt`` for Series, see :ref:`Datetimelike Properties <whatsnew_0150.dt>`
- dropping support for ``PyTables`` less than version 3.0.0, and ``numexpr`` less than version 2.1 (:issue:`7990`)
- API change in using Indexes in set operations, see :ref:`here <whatsnew_0150.index_set_ops>`
+ - API change in using Indexs set operations, see :ref:`here <whatsnew_0150.index_set_ops>`
+ - Split indexing documentation into :ref:`Indexing and Selecing Data <indexing>` and :ref:`MultiIndex / Advanced Indexing <advanced>`
- :ref:`Other Enhancements <whatsnew_0150.enhancements>`
diff --git a/doc/source/v0.4.x.txt b/doc/source/v0.4.x.txt
index 5333bb9ffb157..4717b46a6bca8 100644
--- a/doc/source/v0.4.x.txt
+++ b/doc/source/v0.4.x.txt
@@ -13,7 +13,7 @@ New Features
Series (:issue:`209`, :issue:`203`)
- :ref:`Added <basics.align>` ``Series.align`` method for aligning two series
with choice of join method (ENH56_)
-- :ref:`Added <indexing.get_level_values>` method ``get_level_values`` to
+- :ref:`Added <advanced.get_level_values>` method ``get_level_values`` to
``MultiIndex`` (:issue:`188`)
- Set values in mixed-type ``DataFrame`` objects via ``.ix`` indexing attribute (:issue:`135`)
- Added new ``DataFrame`` :ref:`methods <basics.dtypes>`
@@ -28,7 +28,7 @@ New Features
- ``DataFrame.rename`` has a new ``copy`` parameter to :ref:`rename
<basics.rename>` a DataFrame in place (ENHed_)
- :ref:`Enable <reshaping.unstack_by_name>` unstacking by name (:issue:`142`)
-- :ref:`Enable <indexing.sortlevel_byname>` ``sortlevel`` to work by level (:issue:`141`)
+- :ref:`Enable <advanced.sortlevel_byname>` ``sortlevel`` to work by level (:issue:`141`)
Performance Enhancements
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.5.0.txt b/doc/source/v0.5.0.txt
index d0550fd5ef8f3..8b7e4721d136f 100644
--- a/doc/source/v0.5.0.txt
+++ b/doc/source/v0.5.0.txt
@@ -21,7 +21,7 @@ New Features
- :ref:`Added<reshaping.pivot>` ``pivot_table`` convenience function to pandas namespace (:issue:`234`)
- :ref:`Implemented <basics.rename_axis>` ``Panel.rename_axis`` function (:issue:`243`)
- DataFrame will show index level names in console output (:issue:`334`)
-- :ref:`Implemented <indexing.take>` ``Panel.take``
+- :ref:`Implemented <advanced.take>` ``Panel.take``
- :ref:`Added<basics.console_output>` ``set_eng_float_format`` for alternate DataFrame floating point string formatting (ENH61_)
- :ref:`Added <indexing.set_index>` convenience ``set_index`` function for creating a DataFrame index from its existing columns
- :ref:`Implemented <groupby.multiindex>` ``groupby`` hierarchical index level name (:issue:`223`)
diff --git a/doc/source/v0.6.1.txt b/doc/source/v0.6.1.txt
index 7e593d07f7f2b..a2dab738546f9 100644
--- a/doc/source/v0.6.1.txt
+++ b/doc/source/v0.6.1.txt
@@ -32,7 +32,7 @@ New features
- Add ``Series.from_csv`` function (:issue:`482`)
- :ref:`Can pass <stats.moments.binary>` DataFrame/DataFrame and
DataFrame/Series to rolling_corr/rolling_cov (GH #462)
-- MultiIndex.get_level_values can :ref:`accept the level name <indexing.get_level_values>`
+- MultiIndex.get_level_values can :ref:`accept the level name <advanced.get_level_values>`
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.7.0.txt b/doc/source/v0.7.0.txt
index bf7acd3820db0..cfba2ad3d05b6 100644
--- a/doc/source/v0.7.0.txt
+++ b/doc/source/v0.7.0.txt
@@ -33,7 +33,7 @@ New features
df = DataFrame(randn(10, 4))
df.apply(lambda x: x.describe())
-- :ref:`Add<indexing.reorderlevels>` ``reorder_levels`` method to Series and
+- :ref:`Add<advanced.reorderlevels>` ``reorder_levels`` method to Series and
DataFrame (:issue:`534`)
- :ref:`Add<indexing.dictionarylike>` dict-like ``get`` function to DataFrame
@@ -50,7 +50,7 @@ New features
- :ref:`Add <basics.stats>` ``level`` option to binary arithmetic functions on
``DataFrame`` and ``Series``
-- :ref:`Add <indexing.advanced_reindex>` ``level`` option to the ``reindex``
+- :ref:`Add <advanced.advanced_reindex>` ``level`` option to the ``reindex``
and ``align`` methods on Series and DataFrame for broadcasting values across
a level (:issue:`542`, :issue:`552`, others)
@@ -103,7 +103,7 @@ New features
- :ref:`Added <indexing.set_ops>` ``isin`` method to index objects
-- :ref:`Added <indexing.xs>` ``level`` argument to ``xs`` method of DataFrame.
+- :ref:`Added <advanced.xs>` ``level`` argument to ``xs`` method of DataFrame.
API Changes to integer indexing
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3a75f145587c0..dc89bdd8c9130 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -946,7 +946,7 @@ def to_sql(self, name, con, flavor='sqlite', schema=None, if_exists='fail',
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
- If not None, then rows will be written in batches of this size at a
+ If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
"""
@@ -1383,7 +1383,7 @@ def xs(self, key, axis=0, level=None, copy=None, drop_level=True):
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
- it is a superset of xs functionality, see :ref:`MultiIndex Slicers <indexing.mi_slicers>`
+ it is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 03de19afe0580..95d279add172c 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -711,7 +711,7 @@ def major_xs(self, key, copy=None):
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
- it is a superset of major_xs functionality, see :ref:`MultiIndex Slicers <indexing.mi_slicers>`
+ it is a superset of major_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
@@ -741,7 +741,7 @@ def minor_xs(self, key, copy=None):
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
- it is a superset of minor_xs functionality, see :ref:`MultiIndex Slicers <indexing.mi_slicers>`
+ it is a superset of minor_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
@@ -771,7 +771,7 @@ def xs(self, key, axis=1, copy=None):
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
- it is a superset of xs functionality, see :ref:`MultiIndex Slicers <indexing.mi_slicers>`
+ it is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
| DOC: created advanced indexing section
DOC: add internals section (GH4082)
Mostly just splitting up the indexing docs, but I reorded / revised the Multiindex sections
a bit to avoid duplication / make more clear.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8204 | 2014-09-07T15:43:08Z | 2014-09-12T13:45:03Z | 2014-09-12T13:45:03Z | 2014-09-12T13:45:03Z |
BUG: Fix mismatched logic error in compatibility check | diff --git a/pandas/compat/openpyxl_compat.py b/pandas/compat/openpyxl_compat.py
index 25ba83d58aaed..d0c2a807e14db 100644
--- a/pandas/compat/openpyxl_compat.py
+++ b/pandas/compat/openpyxl_compat.py
@@ -21,4 +21,4 @@ def is_compat():
"""
import openpyxl
ver = LooseVersion(openpyxl.__version__)
- return LooseVersion(start_ver) < ver <= LooseVersion(stop_ver)
+ return LooseVersion(start_ver) <= ver < LooseVersion(stop_ver)
| We want to support below 2.0 (non-inclusive), but including 1.6.1. Check was
doing the opposite. Not worth merging if we accept #7565 , but just wanted this
here as a ping if we don't so we're internally consistent.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8203 | 2014-09-07T10:01:23Z | 2014-09-07T12:52:01Z | 2014-09-07T12:52:01Z | 2014-09-07T12:52:01Z |
ERR: pass original error message in read_sql_query (GH7730) | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index b72c41e45c9ca..037693337a598 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -1104,7 +1104,7 @@ def execute(self, *args, **kwargs):
"Execution failed on sql: %s\n%s\nunable to rollback" % (args[0], e))
raise_with_traceback(ex)
- ex = DatabaseError("Execution failed on sql: %s" % args[0])
+ ex = DatabaseError("Execution failed on sql '%s': %s" % (args[0], e))
raise_with_traceback(ex)
def read_sql(self, sql, index_col=None, coerce_float=True, params=None,
| Closes #7730
| https://api.github.com/repos/pandas-dev/pandas/pulls/8201 | 2014-09-06T23:14:30Z | 2014-09-09T07:51:12Z | 2014-09-09T07:51:12Z | 2014-09-09T07:51:16Z |
BUG: fix variable overwriting in radviz | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 6db3fcaa832c0..92eeb4b76df8b 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -920,3 +920,4 @@ Bug Fixes
- Bug in plotting methods modifying the global matplotlib rcParams (:issue:`8242`).
- Bug in ``DataFrame.__setitem__`` that caused errors when setting a dataframe column to a sparse array (:issue:`8131`)
- Bug where ``Dataframe.boxplot()`` failed when entire column was empty (:issue:`8181`).
+- Bug with messed variables in ``radviz`` visualization (:issue:`8199`).
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 0c150074a9298..328e392c3d8b7 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -405,10 +405,10 @@ def normalize(series):
for kls in classes:
to_plot[kls] = [[], []]
- n = len(frame.columns) - 1
+ m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
- for t in [2.0 * np.pi * (i / float(n))
- for i in range(n)]])
+ for t in [2.0 * np.pi * (i / float(m))
+ for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
| The variable `n` is set to number of rows in dataframe, and then it overwritten with number of columns, but after that there is loop that should iterate over rows and it uses new value in `n` that holds number of columns already.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8199 | 2014-09-06T20:43:30Z | 2014-09-18T12:30:01Z | 2014-09-18T12:30:01Z | 2014-10-02T13:42:16Z |
API: kdeplot fails with NaNs. | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 5a65d4305ec55..71f39d9621bee 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -667,3 +667,5 @@ Bug Fixes
- Bug in accessing groups from a ``GroupBy`` when the original grouper
was a tuple (:issue:`8121`).
+
+- Bug with kde plot and NaNs (:issue:`8182`)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 7694b1b087d10..131edf499ff18 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -745,6 +745,14 @@ def test_kde_kwargs(self):
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
+ @slow
+ def test_kde_missing_vals(self):
+ tm._skip_if_no_scipy()
+ _skip_if_no_scipy_gaussian_kde()
+ s = Series(np.random.uniform(size=50))
+ s[0] = np.nan
+ ax = _check_plot_works(s.plot, kind='kde')
+
@slow
def test_hist_kwargs(self):
ax = self.ts.plot(kind='hist', bins=5)
@@ -1876,6 +1884,14 @@ def test_kde_df(self):
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
+ @slow
+ def test_kde_missing_vals(self):
+ tm._skip_if_no_scipy()
+ _skip_if_no_scipy_gaussian_kde()
+ df = DataFrame(np.random.uniform(size=(100, 4)))
+ df.loc[0, 0] = np.nan
+ ax = _check_plot_works(df.plot, kind='kde')
+
@slow
def test_hist_df(self):
df = DataFrame(randn(100, 4))
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 50f3ab23babad..56316ac726c8a 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -1954,6 +1954,7 @@ def _get_plot_function(self):
from scipy import __version__ as spv
f = MPLPlot._get_plot_function(self)
def plotf(ax, y, style=None, column_num=None, **kwds):
+ y = remove_na(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=self.bw_method)
else:
| I think the most sensible thing to do here is to drop NaN's, like histograms do, rather than filling 0's. The table in #8177 should be updated to reflect that NaN's are dropped, can do that myself after 8177 gets merged.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8196 | 2014-09-06T01:09:43Z | 2014-09-06T13:40:59Z | 2014-09-06T13:40:59Z | 2014-09-06T13:41:22Z |
ENH: more verbosity to PyTables import failures | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 07e9abeaadbb4..965b87d8044ed 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -398,8 +398,8 @@ def __init__(self, path, mode=None, complevel=None, complib=None,
fletcher32=False, **kwargs):
try:
import tables
- except ImportError: # pragma: no cover
- raise ImportError('HDFStore requires PyTables')
+ except ImportError as ex: # pragma: no cover
+ raise ImportError('HDFStore requires PyTables, "{ex}" problem importing'.format(ex=str(ex)))
self._path = path
if mode is None:
| https://api.github.com/repos/pandas-dev/pandas/pulls/8195 | 2014-09-06T00:05:45Z | 2014-09-08T20:45:59Z | 2014-09-08T20:45:59Z | 2014-09-08T20:46:09Z | |
ENH: create Timedelta scalar / TimedeltaIndex | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 017739adbc8b1..242ce9865dc9a 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -146,8 +146,8 @@ Top-level missing data
isnull
notnull
-Top-level dealing with datetimes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Top-level dealing with datetimelike
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
@@ -157,6 +157,7 @@ Top-level dealing with datetimes
date_range
bdate_range
period_range
+ timedelta_range
Top-level evaluation
~~~~~~~~~~~~~~~~~~~~
@@ -440,13 +441,16 @@ Time series-related
Datetimelike Properties
~~~~~~~~~~~~~~~~~~~~~~~
+
``Series.dt`` can be used to access the values of the series as
datetimelike and return several properties.
Due to implementation details the methods show up here as methods of the
-``DatetimeProperties/PeriodProperties`` classes. These can be accessed like ``Series.dt.<property>``.
+``DatetimeProperties/PeriodProperties/TimedeltaProperties`` classes. These can be accessed like ``Series.dt.<property>``.
.. currentmodule:: pandas.tseries.common
+**Datetime Properties**
+
.. autosummary::
:toctree: generated/
@@ -473,6 +477,37 @@ Due to implementation details the methods show up here as methods of the
DatetimeProperties.is_year_start
DatetimeProperties.is_year_end
+**Datetime Methods**
+
+.. autosummary::
+ :toctree: generated/
+
+ DatetimeProperties.to_period
+ DatetimeProperties.to_pydatetime
+ DatetimeProperties.tz_localize
+ DatetimeProperties.tz_convert
+
+**Timedelta Properties**
+
+.. autosummary::
+ :toctree: generated/
+
+ TimedeltaProperties.days
+ TimedeltaProperties.hours
+ TimedeltaProperties.minutes
+ TimedeltaProperties.seconds
+ TimedeltaProperties.milliseconds
+ TimedeltaProperties.microseconds
+ TimedeltaProperties.nanoseconds
+ TimedeltaProperties.components
+
+**Timedelta Methods**
+
+.. autosummary::
+ :toctree: generated/
+
+ TimedeltaProperties.to_pytimedelta
+
String handling
~~~~~~~~~~~~~~~
``Series.str`` can be used to access the values of the series as
@@ -1289,6 +1324,37 @@ Conversion
DatetimeIndex.to_pydatetime
DatetimeIndex.to_series
+TimedeltaIndex
+--------------
+
+.. autosummary::
+ :toctree: generated/
+
+ TimedeltaIndex
+
+Components
+~~~~~~~~~~
+
+.. autosummary::
+ :toctree: generated/
+
+ TimedeltaIndex.days
+ TimedeltaIndex.hours
+ TimedeltaIndex.minutes
+ TimedeltaIndex.seconds
+ TimedeltaIndex.milliseconds
+ TimedeltaIndex.microseconds
+ TimedeltaIndex.nanoseconds
+ TimedeltaIndex.components
+
+Conversion
+~~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ TimedeltaIndex.to_pytimedelta
+ TimedeltaIndex.to_series
+
GroupBy
-------
.. currentmodule:: pandas.core.groupby
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 884976b55d6d1..985cd22c03382 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1122,6 +1122,16 @@ This enables nice expressions like this:
s[s.dt.day==2]
+You can easily produces tz aware transformations:
+
+.. ipython:: python
+
+ stz = s.dt.tz_localize('US/Eastern')
+ stz
+ stz.dt.tz
+
+The ``.dt`` accessor works for period and timedelta dtypes.
+
.. ipython:: python
# period
@@ -1130,6 +1140,15 @@ This enables nice expressions like this:
s.dt.year
s.dt.day
+.. ipython:: python
+
+ # timedelta
+ s = Series(timedelta_range('1 day 00:00:05',periods=4,freq='s'))
+ s
+ s.dt.days
+ s.dt.seconds
+ s.dt.components
+
.. note::
``Series.dt`` will raise a ``TypeError`` if you access with a non-datetimelike values
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 243d1c02d1a65..a293e0a57fc0f 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -636,7 +636,7 @@ Computation
Miscellaneous
-------------
-The :ref:`Timedeltas <timeseries.timedeltas>` docs.
+The :ref:`Timedeltas <timedeltas.timedeltas>` docs.
`Operating with timedeltas
<http://github.com/pydata/pandas/pull/2899>`__
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index a845e31d95e90..1b692a317051d 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -131,6 +131,7 @@ See the package overview for more detail about what's in the library.
merging
reshaping
timeseries
+ timedeltas
categorical
visualization
rplot
diff --git a/doc/source/internals.rst b/doc/source/internals.rst
index e5d2b001c18f8..9418ca5265f1a 100644
--- a/doc/source/internals.rst
+++ b/doc/source/internals.rst
@@ -36,7 +36,8 @@ containers for the axis labels:
data, such as time stamps
- ``Float64Index``: a version of ``Index`` highly optimized for 64-bit float data
- ``MultiIndex``: the standard hierarchical index object
-- ``DatetimeIndex``: An Index object with Timestamp elements
+- ``DatetimeIndex``: An Index object with ``Timestamp`` boxed elements (impl are the int64 values)
+- ``TimedeltaIndex``: An Index object with ``Timedelta`` boxed elements (impl are the in64 values)
- ``PeriodIndex``: An Index object with Period elements
These are range generates to make the creation of a regular index easy:
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst
new file mode 100644
index 0000000000000..b847f02b40594
--- /dev/null
+++ b/doc/source/timedeltas.rst
@@ -0,0 +1,364 @@
+.. currentmodule:: pandas
+.. _timedeltas:
+
+.. ipython:: python
+ :suppress:
+
+ from datetime import datetime, timedelta
+ import numpy as np
+ np.random.seed(123456)
+ from pandas import *
+ randn = np.random.randn
+ randint = np.random.randint
+ np.set_printoptions(precision=4, suppress=True)
+ options.display.max_rows=15
+ import dateutil
+ import pytz
+ from dateutil.relativedelta import relativedelta
+ from pandas.tseries.api import *
+ from pandas.tseries.offsets import *
+
+.. _timedeltas.timedeltas:
+
+***********
+Time Deltas
+***********
+
+.. note::
+
+ Starting in v0.15.0, we introduce a new scalar type ``Timedelta``, which is a subclass of ``datetime.timedelta``, and behaves in a similar manner,
+ but allows compatibility with ``np.timedelta64`` types as well as a host of custom representation, parsing, and attributes.
+
+Timedeltas are differences in times, expressed in difference units, e.g. days,hours,minutes,seconds.
+They can be both positive and negative.
+
+Parsing
+-------
+
+You can construct a ``Timedelta`` scalar thru various arguments:
+
+.. ipython:: python
+
+ # strings
+ Timedelta('1 days')
+ Timedelta('1 days 00:00:00')
+ Timedelta('1 days 2 hours')
+ Timedelta('-1 days 2 min 3us')
+
+ # like datetime.timedelta
+ # note: these MUST be specified as keyword argments
+ Timedelta(days=1,seconds=1)
+
+ # integers with a unit
+ Timedelta(1,unit='d')
+
+ # from a timedelta/np.timedelta64
+ Timedelta(timedelta(days=1,seconds=1))
+ Timedelta(np.timedelta64(1,'ms'))
+
+ # a NaT
+ Timedelta('nan')
+ Timedelta('nat')
+
+:ref:`DateOffsets<timeseries.offsets>` (``Day, Hour, Minute, Second, Milli, Micro, Nano``) can also be used in construction.
+
+.. ipython:: python
+
+ Timedelta(Second(2))
+
+Further, operations among the scalars yield another scalar ``Timedelta``
+
+.. ipython:: python
+
+ Timedelta(Day(2)) + Timedelta(Second(2)) + Timedelta('00:00:00.000123')
+
+to_timedelta
+~~~~~~~~~~~~
+
+.. warning::
+
+ Prior to 0.15.0 ``to_timedelta`` would return a ``Series`` for list-like/Series input, and a ``np.timedelta64`` for scalar input.
+ It will now return a ``TimedeltaIndex`` for list-like input, ``Series`` for Series input, and ``Timedelta`` for scalar input.
+
+ The arguments to ``pd.to_timedelta`` are now ``(arg,unit='ns',box=True)``, previously were ``(arg,unit='ns',box=True)`` as these are more logical.
+
+Using the top-level ``pd.to_timedelta``, you can convert a scalar, array, list, or Series from a recognized timedelta format / value into a ``Timedelta`` type.
+It will construct Series if the input is a Series, a scalar if the input is scalar-like, otherwise will output a ``TimedeltaIndex``
+
+.. ipython:: python
+
+ to_timedelta('1 days 06:05:01.00003')
+ to_timedelta('15.5us')
+ to_timedelta(['1 days 06:05:01.00003','15.5us','nan'])
+ to_timedelta(np.arange(5),unit='s')
+ to_timedelta(np.arange(5),unit='d')
+
+Operations
+----------
+
+You can operate on Series/DataFrames and construct ``timedelta64[ns]`` Series thru
+subtraction operations on ``datetime64[ns]`` Series, or ``Timestamps``.
+
+.. ipython:: python
+
+ s = Series(date_range('2012-1-1', periods=3, freq='D'))
+ td = Series([ Timedelta(days=i) for i in range(3) ])
+ df = DataFrame(dict(A = s, B = td))
+ df
+ df['C'] = df['A'] + df['B']
+ df
+ df.dtypes
+
+ s - s.max()
+ s - datetime(2011,1,1,3,5)
+ s + timedelta(minutes=5)
+ s + Minute(5)
+ s + Minute(5) + Milli(5)
+
+Operations with scalars from a ``timedelta64[ns]`` series
+
+.. ipython:: python
+
+ y = s - s[0]
+ y
+
+Series of timedeltas with ``NaT`` values are supported
+
+.. ipython:: python
+
+ y = s - s.shift()
+ y
+
+Elements can be set to ``NaT`` using ``np.nan`` analogously to datetimes
+
+.. ipython:: python
+
+ y[1] = np.nan
+ y
+
+Operands can also appear in a reversed order (a singular object operated with a Series)
+
+.. ipython:: python
+
+ s.max() - s
+ datetime(2011,1,1,3,5) - s
+ timedelta(minutes=5) + s
+
+``min, max`` and the corresponding ``idxmin, idxmax`` operations are supported on frames
+
+.. ipython:: python
+
+ A = s - Timestamp('20120101') - Timedelta('00:05:05')
+ B = s - Series(date_range('2012-1-2', periods=3, freq='D'))
+
+ df = DataFrame(dict(A=A, B=B))
+ df
+
+ df.min()
+ df.min(axis=1)
+
+ df.idxmin()
+ df.idxmax()
+
+``min, max, idxmin, idxmax`` operations are supported on Series / DataFrames. A single result will be a ``Timedelta``.
+
+.. ipython:: python
+
+ df.min().max()
+ df.min(axis=1).min()
+
+ df.min().idxmax()
+ df.min(axis=1).idxmin()
+
+You can fillna on timedeltas. Integers will be interpreted as seconds. You can
+pass a timedelta to get a particular value.
+
+.. ipython:: python
+
+ y.fillna(0)
+ y.fillna(10)
+ y.fillna(Timedelta('-1 days, 00:00:05'))
+
+You can also negate, multiply and use ``abs`` on ``Timedeltas``
+
+.. ipython:: python
+
+ td1 = Timedelta('-1 days 2 hours 3 seconds')
+ -1 * td1
+ - td1
+ abs(td1)
+
+.. _timedeltas.timedeltas_reductions:
+
+Reductions
+----------
+
+Numeric reduction operation for ``timedelta64[ns]`` will return ``Timedelta`` objects.
+
+.. ipython:: python
+
+ y2 = y.fillna(timedelta(days=-1,seconds=5))
+ y2
+ y2.mean()
+ y2.quantile(.1)
+
+.. _timedeltas.timedeltas_convert:
+
+Frequency Conversion
+--------------------
+
+.. versionadded:: 0.13
+
+Timedelta Series, ``TimedeltaIndex``, and ``Timedelta`` scalars can be converted to other 'frequencies' by dividing by another timedelta,
+or by astyping to a specific timedelta type. These operations yield Series and propogate ``NaT`` -> ``nan``.
+Note that division by the numpy scalar is true division, while astyping is equivalent of floor division.
+
+.. ipython:: python
+
+ td = Series(date_range('20130101',periods=4)) - \
+ Series(date_range('20121201',periods=4))
+ td[2] += timedelta(minutes=5,seconds=3)
+ td[3] = np.nan
+ td
+
+ # to days
+ td / np.timedelta64(1,'D')
+ td.astype('timedelta64[D]')
+
+ # to seconds
+ td / np.timedelta64(1,'s')
+ td.astype('timedelta64[s]')
+
+ # to months (these are constant months)
+ td / np.timedelta64(1,'M')
+
+Dividing or multiplying a ``timedelta64[ns]`` Series by an integer or integer Series
+yields another ``timedelta64[ns]`` dtypes Series.
+
+.. ipython:: python
+
+ td * -1
+ td * Series([1,2,3,4])
+
+Attributes
+----------
+
+You can access various components of the ``Timedelta`` or ``TimedeltaIndex`` directly using the attributes ``days,hours,minutes,seconds,milliseconds,microseconds,nanoseconds``.
+These operations can be directly accessed via the ``.dt`` property of the ``Series`` as well. These return an integer representing that interval (which is signed according to whether the ``Timedelta`` is signed).
+
+For a ``Series``
+
+.. ipython:: python
+
+ td.dt.days
+ td.dt.seconds
+
+You can access the component field for a scalar ``Timedelta`` directly.
+
+.. ipython:: python
+
+ tds = Timedelta('31 days 5 min 3 sec')
+ tds.days
+ tds.seconds
+ (-tds).seconds
+
+You can use the ``.components`` property to access a reduced form of the timedelta. This returns a ``DataFrame`` indexed
+similarly to the ``Series``
+
+.. ipython:: python
+
+ td.dt.components
+
+.. _timedeltas.attribues_warn:
+
+.. warning::
+
+ ``Timedelta`` scalars (and ``TimedeltaIndex``) component fields are *not the same* as the component fields on a ``datetime.timedelta`` object. For example, ``.seconds`` on a ``datetime.timedelta`` object returns the total number of seconds combined between ``hours``, ``minutes`` and ``seconds``. In contrast, the pandas ``Timedelta`` breaks out hours, minutes, microseconds and nanoseconds separately.
+
+ .. ipython:: python
+
+ # Timedelta accessor
+ tds = Timedelta('31 days 5 min 3 sec')
+ tds.minutes
+ tds.seconds
+
+ # datetime.timedelta accessor
+ # this is 5 minutes * 60 + 3 seconds
+ tds.to_timedelta().seconds
+
+
+TimedeltaIndex
+--------------
+
+.. versionadded:: 0.15.0
+
+To generate an index with time delta, you can use either the TimedeltaIndex or
+the ``timedelta_range`` constructor.
+
+Using ``TimedeltaIndex`` you can pass string-like, ``Timedelta``, ``timedelta``,
+or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent missing values.
+
+.. ipython:: python
+
+ TimedeltaIndex(['1 days','1 days, 00:00:05',
+ np.timedelta64(2,'D'),timedelta(days=2,seconds=2)])
+
+Similarly to ``date_range``, you can construct regular ranges of a ``TimedeltaIndex``:
+
+.. ipython:: python
+
+ timedelta_range(start='1 days',periods=5,freq='D')
+ timedelta_range(start='1 days',end='2 days',freq='30T')
+
+Using the TimedeltaIndex
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Similarly to other of the datetime-like indices, ``DatetimeIndex`` and ``PeriodIndex``, you can use
+``TimedeltaIndex`` as the index of pandas objects.
+
+.. ipython:: python
+
+ s = Series(np.arange(100),
+ index=timedelta_range('1 days',periods=100,freq='h'))
+ s
+
+Selections work similary, with coercion on string-likes and slices:
+
+.. ipython:: python
+
+ s['1 day':'2 day']
+ s['1 day 01:00:00']
+ s[Timedelta('1 day 1h')]
+
+Furthermore you can use partial string selection and the range will be inferred:
+
+.. ipython:: python
+
+ s['1 day':'1 day 5 hours']
+
+Finally, the combination of ``TimedeltaIndex`` with ``DatetimeIndex`` allow certain combination operations that are NaT preserving:
+
+.. ipython:: python
+
+ tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'])
+ tdi.tolist()
+ dti = date_range('20130101',periods=3)
+ dti.tolist()
+ (dti + tdi).tolist()
+ (dti - tdi).tolist()
+
+Similarly to frequency conversion on a ``Series`` above, you can convert these indices to yield another Index.
+
+.. ipython:: python
+
+ tdi / np.timedelta64(1,'s')
+ tdi.astype('timedelta64[s]')
+
+Scalars type ops work as well
+
+.. ipython:: python
+
+ tdi + Timestamp('20130101')
+ tdi + Timedelta('10 days')
+ (Timestamp('20130101') - tdi).tolist()
+ tdi / tdi[0]
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index a23d067cefa4f..963dcde0f1a1f 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1491,8 +1491,8 @@ TimeSeries, aligning the data on the UTC timestamps:
result
result.index
-To remove timezone from tz-aware ``DatetimeIndex``, use ``tz_localize(None)`` or ``tz_convert(None)``.
-``tz_localize(None)`` will remove timezone holding local time representations.
+To remove timezone from tz-aware ``DatetimeIndex``, use ``tz_localize(None)`` or ``tz_convert(None)``.
+``tz_localize(None)`` will remove timezone holding local time representations.
``tz_convert(None)`` will remove timezone after converting to UTC time.
.. ipython:: python
@@ -1511,7 +1511,7 @@ Ambiguous Times when Localizing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In some cases, localize cannot determine the DST and non-DST hours when there are
-duplicates. This often happens when reading files or database records that simply
+duplicates. This often happens when reading files or database records that simply
duplicate the hours. Passing ``ambiguous='infer'`` (``infer_dst`` argument in prior
releases) into ``tz_localize`` will attempt to determine the right offset.
@@ -1526,186 +1526,23 @@ releases) into ``tz_localize`` will attempt to determine the right offset.
rng_hourly_eastern.values
In addition to 'infer', there are several other arguments supported. Passing
-an array-like of bools or 0s/1s where True represents a DST hour and False a
-non-DST hour, allows for distinguishing more than one DST
-transition (e.g., if you have multiple records in a database each with their
+an array-like of bools or 0s/1s where True represents a DST hour and False a
+non-DST hour, allows for distinguishing more than one DST
+transition (e.g., if you have multiple records in a database each with their
own DST transition). Or passing 'NaT' will fill in transition times
with not-a-time values. These methods are available in the ``DatetimeIndex``
constructor as well as ``tz_localize``.
.. ipython:: python
-
+
rng_hourly_dst = np.array([1, 1, 0, 0, 0])
rng_hourly.tz_localize('US/Eastern', ambiguous=rng_hourly_dst).values
rng_hourly.tz_localize('US/Eastern', ambiguous='NaT').values
+ didx = DatetimeIndex(start='2014-08-01 09:00', freq='H', periods=10, tz='US/Eastern')
+ didx
+ didx.tz_localize(None)
+ didx.tz_convert(None)
-.. _timeseries.timedeltas:
-
-Time Deltas
------------
-
-Timedeltas are differences in times, expressed in difference units, e.g. days,hours,minutes,seconds.
-They can be both positive and negative. :ref:`DateOffsets<timeseries.offsets>` that are absolute in nature
-(``Day, Hour, Minute, Second, Milli, Micro, Nano``) can be used as ``timedeltas``.
-
-.. ipython:: python
-
- from datetime import datetime, timedelta
- s = Series(date_range('2012-1-1', periods=3, freq='D'))
- td = Series([ timedelta(days=i) for i in range(3) ])
- df = DataFrame(dict(A = s, B = td))
- df
- df['C'] = df['A'] + df['B']
- df
- df.dtypes
-
- s - s.max()
- s - datetime(2011,1,1,3,5)
- s + timedelta(minutes=5)
- s + Minute(5)
- s + Minute(5) + Milli(5)
-
-Getting scalar results from a ``timedelta64[ns]`` series
-
-.. ipython:: python
-
- y = s - s[0]
- y
-
-Series of timedeltas with ``NaT`` values are supported
-
-.. ipython:: python
-
- y = s - s.shift()
- y
-
-Elements can be set to ``NaT`` using ``np.nan`` analogously to datetimes
-
-.. ipython:: python
-
- y[1] = np.nan
- y
-
-Operands can also appear in a reversed order (a singular object operated with a Series)
-
-.. ipython:: python
-
- s.max() - s
- datetime(2011,1,1,3,5) - s
- timedelta(minutes=5) + s
-
-Some timedelta numeric like operations are supported.
-
-.. ipython:: python
-
- td - timedelta(minutes=5, seconds=5, microseconds=5)
-
-``min, max`` and the corresponding ``idxmin, idxmax`` operations are supported on frames
-
-.. ipython:: python
-
- A = s - Timestamp('20120101') - timedelta(minutes=5, seconds=5)
- B = s - Series(date_range('2012-1-2', periods=3, freq='D'))
-
- df = DataFrame(dict(A=A, B=B))
- df
-
- df.min()
- df.min(axis=1)
-
- df.idxmin()
- df.idxmax()
-
-``min, max`` operations are supported on series; these return a single element
-``timedelta64[ns]`` Series (this avoids having to deal with numpy timedelta64
-issues). ``idxmin, idxmax`` are supported as well.
-
-.. ipython:: python
-
- df.min().max()
- df.min(axis=1).min()
-
- df.min().idxmax()
- df.min(axis=1).idxmin()
-
-You can fillna on timedeltas. Integers will be interpreted as seconds. You can
-pass a timedelta to get a particular value.
-
-.. ipython:: python
-
- y.fillna(0)
- y.fillna(10)
- y.fillna(timedelta(days=-1,seconds=5))
-
-.. _timeseries.timedeltas_reductions:
-
-Time Deltas & Reductions
-------------------------
-
-.. warning::
-
- A numeric reduction operation for ``timedelta64[ns]`` can return a single-element ``Series`` of
- dtype ``timedelta64[ns]``.
-
-You can do numeric reduction operations on timedeltas.
-
-.. ipython:: python
-
- y2 = y.fillna(timedelta(days=-1,seconds=5))
- y2
- y2.mean()
- y2.quantile(.1)
-
-.. _timeseries.timedeltas_convert:
-
-Time Deltas & Conversions
--------------------------
-
-.. versionadded:: 0.13
-
-**string/integer conversion**
-
-Using the top-level ``to_timedelta``, you can convert a scalar or array from the standard
-timedelta format (produced by ``to_csv``) into a timedelta type (``np.timedelta64`` in ``nanoseconds``).
-It can also construct Series.
-
-.. warning::
-
- This requires ``numpy >= 1.7``
-
-.. ipython:: python
-
- to_timedelta('1 days 06:05:01.00003')
- to_timedelta('15.5us')
- to_timedelta(['1 days 06:05:01.00003','15.5us','nan'])
- to_timedelta(np.arange(5),unit='s')
- to_timedelta(np.arange(5),unit='d')
-
-**frequency conversion**
-
-Timedeltas can be converted to other 'frequencies' by dividing by another timedelta,
-or by astyping to a specific timedelta type. These operations yield ``float64`` dtyped Series.
-
-.. ipython:: python
-
- td = Series(date_range('20130101',periods=4))-Series(date_range('20121201',periods=4))
- td[2] += np.timedelta64(timedelta(minutes=5,seconds=3))
- td[3] = np.nan
- td
-
- # to days
- td / np.timedelta64(1,'D')
- td.astype('timedelta64[D]')
-
- # to seconds
- td / np.timedelta64(1,'s')
- td.astype('timedelta64[s]')
-
-Dividing or multiplying a ``timedelta64[ns]`` Series by an integer or integer Series
-yields another ``timedelta64[ns]`` dtypes Series.
-
-.. ipython:: python
-
- td * -1
- td * Series([1,2,3,4])
+ # tz_convert(None) is identical with tz_convert('UTC').tz_localize(None)
+ didx.tz_convert('UCT').tz_localize(None)
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index ac0a14f45b69e..78239eef1b98f 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -460,7 +460,7 @@ Enhancements
get_dummies([1, 2, np.nan], dummy_na=True)
-- ``timedelta64[ns]`` operations. See :ref:`the docs<timeseries.timedeltas_convert>`.
+- ``timedelta64[ns]`` operations. See :ref:`the docs<timedeltas.timedeltas_convert>`.
.. warning::
@@ -479,7 +479,7 @@ Enhancements
A Series of dtype ``timedelta64[ns]`` can now be divided by another
``timedelta64[ns]`` object, or astyped to yield a ``float64`` dtyped Series. This
- is frequency conversion. See :ref:`the docs<timeseries.timedeltas_convert>` for the docs.
+ is frequency conversion. See :ref:`the docs<timedeltas.timedeltas_convert>` for the docs.
.. ipython:: python
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 49c431d8071e8..5a4f3b7da4843 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -16,11 +16,11 @@ users upgrade to this version.
- Highlights include:
- The ``Categorical`` type was integrated as a first-class pandas type, see :ref:`here <whatsnew_0150.cat>`
+ - New scalar type ``Timedelta``, and a new index type ``TimedeltaIndex``, see :ref:`here <whatsnew_0150.timedeltaindex>`
- Internal refactoring of the ``Index`` class to no longer sub-class ``ndarray``, see :ref:`Internal Refactoring <whatsnew_0150.refactoring>`
- New datetimelike properties accessor ``.dt`` for Series, see :ref:`Datetimelike Properties <whatsnew_0150.dt>`
- dropping support for ``PyTables`` less than version 3.0.0, and ``numexpr`` less than version 2.1 (:issue:`7990`)
- API change in using Indexes in set operations, see :ref:`here <whatsnew_0150.index_set_ops>`
- - API change in using Indexs set operations, see :ref:`here <whatsnew_0150.index_set_ops>`
- Split indexing documentation into :ref:`Indexing and Selecing Data <indexing>` and :ref:`MultiIndex / Advanced Indexing <advanced>`
- :ref:`Other Enhancements <whatsnew_0150.enhancements>`
@@ -57,7 +57,7 @@ API changes
.. ipython:: python
- idx = pandas.MultiIndex.from_product([['a'], range(3), list("pqr")], names=['foo', 'bar', 'baz'])
+ idx = MultiIndex.from_product([['a'], range(3), list("pqr")], names=['foo', 'bar', 'baz'])
idx.set_names('qux', level=0)
idx.set_names(['qux','baz'], level=[0,1])
idx.set_levels(['a','b','c'], level='bar')
@@ -384,6 +384,7 @@ This will return a Series, indexed like the existing Series. See the :ref:`docs
s.dt.hour
s.dt.second
s.dt.day
+ s.dt.freq
This enables nice expressions like this:
@@ -391,6 +392,16 @@ This enables nice expressions like this:
s[s.dt.day==2]
+You can easily produce tz aware transformations:
+
+.. ipython:: python
+
+ stz = s.dt.tz_localize('US/Eastern')
+ stz
+ stz.dt.tz
+
+The ``.dt`` accessor works for period and timedelta dtypes.
+
.. ipython:: python
# period
@@ -399,6 +410,15 @@ This enables nice expressions like this:
s.dt.year
s.dt.day
+.. ipython:: python
+
+ # timedelta
+ s = Series(timedelta_range('1 day 00:00:05',periods=4,freq='s'))
+ s
+ s.dt.days
+ s.dt.seconds
+ s.dt.components
+
.. _whatsnew_0150.refactoring:
Internal Refactoring
@@ -455,6 +475,108 @@ For full docs, see the :ref:`Categorical introduction <categorical>` and the
only. If you want to manipulate codes, please use one of the
:ref:`API methods on Categoricals <api.categorical>`.
+.. _whatsnew_0150.timedeltaindex:
+
+TimedeltaIndex/Scalar
+~~~~~~~~~~~~~~~~~~~~~
+
+We introduce a new scalar type ``Timedelta``, which is a subclass of ``datetime.timedelta``, and behaves in a similar manner,
+but allows compatibility with ``np.timedelta64`` types as well as a host of custom representation, parsing, and attributes.
+This type is very similar to how ``Timestamp`` works for ``datetimes``. It is a nice-API box for the type. See the :ref:`docs <timedeltas.timedeltas>`.
+(:issue:`3009`, :issue:`4533`, :issue:`8209`, :issue:`8187`, :issue:`8190`, :issue:`7869`)
+
+.. warning::
+
+ ``Timedelta`` scalars (and ``TimedeltaIndex``) component fields are *not the same* as the component fields on a ``datetime.timedelta`` object. For example, ``.seconds`` on a ``datetime.timedelta`` object returns the total number of seconds combined between ``hours``, ``minutes`` and ``seconds``. In contrast, the pandas ``Timedelta`` breaks out hours, minutes, microseconds and nanoseconds separately.
+
+ .. ipython:: python
+
+ # Timedelta accessor
+ tds = Timedelta('31 days 5 min 3 sec')
+ tds.minutes
+ tds.seconds
+
+ # datetime.timedelta accessor
+ # this is 5 minutes * 60 + 3 seconds
+ tds.to_timedelta().seconds
+
+.. warning::
+
+ Prior to 0.15.0 ``to_timedelta`` would return a ``Series`` for list-like/Series input, and a ``np.timedelta64`` for scalar input.
+ It will now return a ``TimedeltaIndex`` for list-like input, ``Series`` for Series input, and ``Timedelta`` for scalar input.
+
+ The arguments to ``pd.to_timedelta`` are now ``(arg,unit='ns',box=True)``, previously were ``(arg,unit='ns',box=True)`` as these are more logical.
+
+Consruct a scalar
+
+.. ipython:: python
+
+ Timedelta('1 days 06:05:01.00003')
+ Timedelta('15.5us')
+ Timedelta('1 hour 15.5us')
+
+ # a NaT
+ Timedelta('nan')
+
+Access fields for a Timedelta
+
+.. ipython:: python
+
+ td = Timedelta('1 hour 3m 15.5us')
+ td.hours
+ td.minutes
+ td.microseconds
+ td.nanoseconds
+
+Construct a ``TimedeltaIndex``
+
+.. ipython:: python
+ :suppress:
+
+ import datetime
+ from datetime import timedelta
+
+.. ipython:: python
+
+ TimedeltaIndex(['1 days','1 days, 00:00:05',
+ np.timedelta64(2,'D'),timedelta(days=2,seconds=2)])
+
+Constructing a ``TimedeltaIndex`` with a regular range
+
+.. ipython:: python
+
+ timedelta_range('1 days',periods=5,freq='D')
+ timedelta_range(start='1 days',end='2 days',freq='30T')
+
+You can now use a ``TimedeltaIndex`` as the index of a pandas object
+
+.. ipython:: python
+
+ s = Series(np.arange(5),
+ index=timedelta_range('1 days',periods=5,freq='s'))
+ s
+
+You can select with partial string selections
+
+.. ipython:: python
+
+ s['1 day 00:00:02']
+ s['1 day':'1 day 00:00:02']
+
+Finally, the combination of ``TimedeltaIndex`` with ``DatetimeIndex`` allow certain combination operations that are NaT preserving:
+
+.. ipython:: python
+
+ tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'])
+ tdi.tolist()
+ dti = date_range('20130101',periods=3)
+ dti.tolist()
+
+ (dti + tdi).tolist()
+ (dti - tdi).tolist()
+
+- iteration of a ``Series`` e.g. ``list(Series(...))`` of ``timedelta64[ns]`` would prior to v0.15.0 return ``np.timedelta64`` for each element. These will now be wrapped in ``Timedelta``.
+
.. _whatsnew_0150.prior_deprecations:
Prior Version Deprecations/Changes
diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py
index 9a1e61ad30386..25d6a7f293dac 100644
--- a/pandas/computation/pytables.py
+++ b/pandas/computation/pytables.py
@@ -180,7 +180,7 @@ def stringify(value):
v = time.mktime(v.timetuple())
return TermValue(v, pd.Timestamp(v), kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
- v = _coerce_scalar_to_timedelta_type(v, unit='s').item()
+ v = _coerce_scalar_to_timedelta_type(v, unit='s').value
return TermValue(int(v), v, kind)
elif kind == u('integer'):
v = int(float(v))
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index ee9854f8dc5f9..8d1b1588552bf 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -124,7 +124,9 @@ def factorize(values, sort=False, order=None, na_sentinel=-1):
from pandas.core.index import Index
from pandas.core.series import Series
vals = np.asarray(values)
+
is_datetime = com.is_datetime64_dtype(vals)
+ is_timedelta = com.is_timedelta64_dtype(vals)
(hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)
table = hash_klass(len(vals))
@@ -161,6 +163,8 @@ def factorize(values, sort=False, order=None, na_sentinel=-1):
if is_datetime:
uniques = uniques.astype('M8[ns]')
+ elif is_timedelta:
+ uniques = uniques.astype('m8[ns]')
if isinstance(values, Index):
uniques = values._simple_new(uniques, None, freq=getattr(values, 'freq', None),
tz=getattr(values, 'tz', None))
@@ -401,7 +405,8 @@ def _get_data_algo(values, func_map):
if com.is_float_dtype(values):
f = func_map['float64']
values = com._ensure_float64(values)
- elif com.is_datetime64_dtype(values):
+
+ elif com.needs_i8_conversion(values):
# if we have NaT, punt to object dtype
mask = com.isnull(values)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 4e8228f3d8631..36cf3d9c7407c 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -161,7 +161,9 @@ def f(self, *args, **kwargs):
else:
f = _create_delegator_method(name)
- setattr(cls,name,f)
+ # don't overwrite existing methods/properties
+ if not hasattr(cls, name):
+ setattr(cls,name,f)
class FrozenList(PandasObject, list):
@@ -539,218 +541,3 @@ def duplicated(self, take_last=False):
def _update_inplace(self, result):
raise NotImplementedError
-
-class DatetimeIndexOpsMixin(object):
- """ common ops mixin to support a unified inteface datetimelike Index """
-
- def __iter__(self):
- return (self._box_func(v) for v in self.asi8)
-
- @property
- def _box_func(self):
- """
- box function to get object from internal representation
- """
- raise NotImplementedError
-
- def _box_values(self, values):
- """
- apply box func to passed values
- """
- return lib.map_infer(values, self._box_func)
-
- @cache_readonly
- def hasnans(self):
- """ return if I have any nans; enables various perf speedups """
- return (self.asi8 == tslib.iNaT).any()
-
- @property
- def asobject(self):
- from pandas.core.index import Index
- return Index(self._box_values(self.asi8), name=self.name, dtype=object)
-
- def tolist(self):
- """
- return a list of the underlying data
- """
- return list(self.asobject)
-
- def min(self, axis=None):
- """
- return the minimum value of the Index
-
- See also
- --------
- numpy.ndarray.min
- """
- try:
- i8 = self.asi8
-
- # quick check
- if len(i8) and self.is_monotonic:
- if i8[0] != tslib.iNaT:
- return self._box_func(i8[0])
-
- if self.hasnans:
- mask = i8 == tslib.iNaT
- min_stamp = self[~mask].asi8.min()
- else:
- min_stamp = i8.min()
- return self._box_func(min_stamp)
- except ValueError:
- return self._na_value
-
- def argmin(self, axis=None):
- """
- return a ndarray of the minimum argument indexer
-
- See also
- --------
- numpy.ndarray.argmin
- """
-
- i8 = self.asi8
- if self.hasnans:
- mask = i8 == tslib.iNaT
- if mask.all():
- return -1
- i8 = i8.copy()
- i8[mask] = np.iinfo('int64').max
- return i8.argmin()
-
- def max(self, axis=None):
- """
- return the maximum value of the Index
-
- See also
- --------
- numpy.ndarray.max
- """
- try:
- i8 = self.asi8
-
- # quick check
- if len(i8) and self.is_monotonic:
- if i8[-1] != tslib.iNaT:
- return self._box_func(i8[-1])
-
- if self.hasnans:
- mask = i8 == tslib.iNaT
- max_stamp = self[~mask].asi8.max()
- else:
- max_stamp = i8.max()
- return self._box_func(max_stamp)
- except ValueError:
- return self._na_value
-
- def argmax(self, axis=None):
- """
- return a ndarray of the maximum argument indexer
-
- See also
- --------
- numpy.ndarray.argmax
- """
-
- i8 = self.asi8
- if self.hasnans:
- mask = i8 == tslib.iNaT
- if mask.all():
- return -1
- i8 = i8.copy()
- i8[mask] = 0
- return i8.argmax()
-
- @property
- def _formatter_func(self):
- """
- Format function to convert value to representation
- """
- return str
-
- def _format_footer(self):
- tagline = 'Length: %d, Freq: %s, Timezone: %s'
- return tagline % (len(self), self.freqstr, self.tz)
-
- def __unicode__(self):
- formatter = self._formatter_func
- summary = str(self.__class__) + '\n'
-
- n = len(self)
- if n == 0:
- pass
- elif n == 1:
- first = formatter(self[0])
- summary += '[%s]\n' % first
- elif n == 2:
- first = formatter(self[0])
- last = formatter(self[-1])
- summary += '[%s, %s]\n' % (first, last)
- else:
- first = formatter(self[0])
- last = formatter(self[-1])
- summary += '[%s, ..., %s]\n' % (first, last)
-
- summary += self._format_footer()
- return summary
-
- @cache_readonly
- def _resolution(self):
- from pandas.tseries.frequencies import Resolution
- return Resolution.get_reso_from_freq(self.freqstr)
-
- @cache_readonly
- def resolution(self):
- """
- Returns day, hour, minute, second, millisecond or microsecond
- """
- from pandas.tseries.frequencies import get_reso_string
- return get_reso_string(self._resolution)
-
- def __add__(self, other):
- from pandas.core.index import Index
- from pandas.tseries.offsets import DateOffset
- if isinstance(other, Index):
- warnings.warn("using '+' to provide set union with Indexes is deprecated, "
- "use .union()",FutureWarning)
- return self.union(other)
- if isinstance(other, (DateOffset, datetime.timedelta, np.timedelta64)):
- return self._add_delta(other)
- elif com.is_integer(other):
- return self.shift(other)
- else: # pragma: no cover
- return NotImplemented
-
- def __sub__(self, other):
- from pandas.core.index import Index
- from pandas.tseries.offsets import DateOffset
- if isinstance(other, Index):
- warnings.warn("using '-' to provide set differences with Indexes is deprecated, "
- "use .difference()",FutureWarning)
- return self.difference(other)
- if isinstance(other, (DateOffset, datetime.timedelta, np.timedelta64)):
- return self._add_delta(-other)
- elif com.is_integer(other):
- return self.shift(-other)
- else: # pragma: no cover
- return NotImplemented
-
- __iadd__ = __add__
- __isub__ = __sub__
-
- def _add_delta(self, other):
- return NotImplemented
-
- def unique(self):
- """
- Index.unique with handling for DatetimeIndex/PeriodIndex metadata
-
- Returns
- -------
- result : DatetimeIndex or PeriodIndex
- """
- from pandas.core.index import Int64Index
- result = Int64Index.unique(self)
- return self._simple_new(result, name=self.name, freq=self.freq,
- tz=getattr(self, 'tz', None))
-
diff --git a/pandas/core/common.py b/pandas/core/common.py
index ff9da5d401850..3695bc1898091 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -64,6 +64,13 @@ def _check(cls, inst):
return meta(name, tuple(), dct)
+ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index",))
+ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ", ("int64index",))
+ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ", ("float64index",))
+ABCMultiIndex = create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",))
+ABCDatetimeIndex = create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",))
+ABCTimedeltaIndex = create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",))
+ABCPeriodIndex = create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",))
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",))
@@ -879,7 +886,6 @@ def func(arr, indexer, out, fill_value=np.nan):
func(arr, indexer, out=out, fill_value=fill_value)
return out
-
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
@@ -889,24 +895,25 @@ def func(arr, indexer, out, fill_value=np.nan):
'int8': algos.diff_2d_int8,
}
-
def diff(arr, n, axis=0):
""" difference of n between self,
analagoust to s-s.shift(n) """
n = int(n)
- dtype = arr.dtype
na = np.nan
-
- if is_timedelta64_dtype(arr) or is_datetime64_dtype(arr):
- dtype = 'timedelta64[ns]'
+ dtype = arr.dtype
+ is_timedelta = False
+ if needs_i8_conversion(arr):
+ dtype = np.float64
arr = arr.view('i8')
na = tslib.iNaT
+ is_timedelta = True
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif issubclass(dtype.type, np.bool_):
dtype = np.object_
+ dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
@@ -927,7 +934,7 @@ def diff(arr, n, axis=0):
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
- if dtype == 'timedelta64[ns]':
+ if is_timedelta:
res = arr[res_indexer]
lag = arr[lag_indexer]
@@ -944,6 +951,9 @@ def diff(arr, n, axis=0):
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
+ if is_timedelta:
+ out_arr = lib.map_infer(out_arr.ravel(),lib.Timedelta).reshape(out_arr.shape)
+
return out_arr
@@ -1780,7 +1790,7 @@ def _maybe_box_datetimelike(value):
if isinstance(value, np.datetime64):
value = tslib.Timestamp(value)
elif isinstance(value, np.timedelta64):
- pass
+ value = tslib.Timedelta(value)
return value
@@ -2335,6 +2345,14 @@ def is_period_arraylike(arr):
return arr.dtype == object and lib.infer_dtype(arr) == 'period'
return getattr(arr, 'inferred_type', None) == 'period'
+def is_datetime_arraylike(arr):
+ """ return if we are datetime arraylike / DatetimeIndex """
+ if isinstance(arr, pd.DatetimeIndex):
+ return True
+ elif isinstance(arr, (np.ndarray, ABCSeries)):
+ return arr.dtype == object and lib.infer_dtype(arr) == 'datetime'
+ return getattr(arr, 'inferred_type', None) == 'datetime'
+
def _coerce_to_dtype(dtype):
""" coerce a string / np.dtype to a dtype """
if is_categorical_dtype(dtype):
@@ -2406,6 +2424,13 @@ def _is_datetime_or_timedelta_dtype(arr_or_dtype):
needs_i8_conversion = _is_datetime_or_timedelta_dtype
+def i8_boxer(arr_or_dtype):
+ """ return the scalar boxer for the dtype """
+ if is_datetime64_dtype(arr_or_dtype):
+ return lib.Timestamp
+ elif is_timedelta64_dtype(arr_or_dtype):
+ return lambda x: lib.Timedelta(x,unit='ns')
+ raise ValueError("cannot find a scalar boxer for {0}".format(arr_or_dtype))
def is_numeric_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
@@ -2523,7 +2548,7 @@ def _astype_nansafe(arr, dtype, copy=True):
if dtype == np.int64:
return arr.view(dtype)
elif dtype == object:
- return arr.astype(object)
+ return tslib.ints_to_pytimedelta(arr.view(np.int64))
# in py3, timedelta64[ns] are int64
elif ((compat.PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
@@ -2745,27 +2770,38 @@ def _concat_compat(to_concat, axis=0):
# marginal given that it would still require shape & dtype calculation and
# np.concatenate which has them both implemented is compiled.
if nonempty:
+
is_datetime64 = [x.dtype == _NS_DTYPE for x in nonempty]
+ is_timedelta64 = [x.dtype == _TD_DTYPE for x in nonempty]
+
if all(is_datetime64):
- # work around NumPy 1.6 bug
new_values = np.concatenate([x.view(np.int64) for x in nonempty],
axis=axis)
return new_values.view(_NS_DTYPE)
- elif any(is_datetime64):
+ elif all(is_timedelta64):
+ new_values = np.concatenate([x.view(np.int64) for x in nonempty],
+ axis=axis)
+ return new_values.view(_TD_DTYPE)
+ elif any(is_datetime64) or any(is_timedelta64):
to_concat = [_to_pydatetime(x) for x in nonempty]
return np.concatenate(to_concat, axis=axis)
def _to_pydatetime(x):
+ # coerce to an object dtyped
+
if x.dtype == _NS_DTYPE:
shape = x.shape
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel())
x = x.reshape(shape)
+ elif x.dtype == _TD_DTYPE:
+ shape = x.shape
+ x = tslib.ints_to_pytimedelta(x.view(np.int64).ravel())
+ x = x.reshape(shape)
return x
-
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 2658410358000..190eb2dc3bbda 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -16,7 +16,7 @@
from pandas.core.config import get_option, set_option, reset_option
import pandas.core.common as com
import pandas.lib as lib
-from pandas.tslib import iNaT
+from pandas.tslib import iNaT, Timestamp, Timedelta
import numpy as np
@@ -1230,10 +1230,10 @@ def _helper_csv(self, writer, na_rep=None, cols=None,
writer.writerow(encoded_cols)
if date_format is None:
- date_formatter = lambda x: lib.Timestamp(x)._repr_base
+ date_formatter = lambda x: Timestamp(x)._repr_base
else:
def strftime_with_nulls(x):
- x = lib.Timestamp(x)
+ x = Timestamp(x)
if notnull(x):
return x.strftime(date_format)
@@ -1273,7 +1273,7 @@ def strftime_with_nulls(x):
if float_format is not None and com.is_float(val):
val = float_format % val
- elif isinstance(val, (np.datetime64, lib.Timestamp)):
+ elif isinstance(val, (np.datetime64, Timestamp)):
val = date_formatter(val)
row_fields.append(val)
@@ -1922,8 +1922,8 @@ def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or lib.checknull(x):
return nat_rep
- if tz is not None or not isinstance(x, lib.Timestamp):
- x = lib.Timestamp(x, tz=tz)
+ if tz is not None or not isinstance(x, Timestamp):
+ x = Timestamp(x, tz=tz)
return str(x)
@@ -1932,8 +1932,8 @@ def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or lib.checknull(x):
return nat_rep
- if not isinstance(x, lib.Timestamp):
- x = lib.Timestamp(x)
+ if not isinstance(x, Timestamp):
+ x = Timestamp(x)
if date_format:
return x.strftime(date_format)
@@ -1944,7 +1944,7 @@ def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
def _is_dates_only(values):
for d in values:
if isinstance(d, np.datetime64):
- d = lib.Timestamp(d)
+ d = Timestamp(d)
if d is not None and not lib.checknull(d) and d._has_time_component():
return False
@@ -1972,15 +1972,24 @@ def _get_format_datetime64_from_values(values,
class Timedelta64Formatter(GenericArrayFormatter):
- def _format_strings(self):
- formatter = self.formatter or _get_format_timedelta64(self.values)
+ def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
+ super(Timedelta64Formatter, self).__init__(values, **kwargs)
+ self.nat_rep = nat_rep
+ self.box = box
+ def _format_strings(self):
+ formatter = self.formatter or _get_format_timedelta64(self.values, nat_rep=self.nat_rep, box=self.box)
fmt_values = [formatter(x) for x in self.values]
-
return fmt_values
-def _get_format_timedelta64(values):
+def _get_format_timedelta64(values, nat_rep='NaT', box=False):
+ """
+ return a formatter function for a range of timedeltas. These will all have the same format argument
+
+ if box, then show the return in quotes
+ """
+
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
@@ -1989,19 +1998,25 @@ def _get_format_timedelta64(values):
even_days = np.logical_and(consider_values, values_int % one_day_in_nanos != 0).sum() == 0
all_sub_day = np.logical_and(consider_values, np.abs(values_int) >= one_day_in_nanos).sum() == 0
- format_short = even_days or all_sub_day
- format = "short" if format_short else "long"
+ if even_days:
+ format = 'even_day'
+ elif all_sub_day:
+ format = 'sub_day'
+ else:
+ format = 'long'
- def impl(x):
+ def _formatter(x):
if x is None or lib.checknull(x):
- return 'NaT'
- elif format_short and com.is_integer(x) and x.view('int64') == 0:
- return "0 days" if even_days else "00:00:00"
- else:
- return lib.repr_timedelta64(x, format=format)
+ return nat_rep
- return impl
+ if not isinstance(x, Timedelta):
+ x = Timedelta(x)
+ result = x._repr_base(format=format)
+ if box:
+ result = "'{0}'".format(result)
+ return result
+ return _formatter
def _make_fixed_width(strings, justify='right', minimum=None):
if len(strings) == 0 or justify == 'all':
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 961e488026731..9140ef25019db 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -11,7 +11,7 @@
import pandas.lib as lib
import pandas.algos as _algos
import pandas.index as _index
-from pandas.lib import Timestamp, is_datetime_array
+from pandas.lib import Timestamp, Timedelta, is_datetime_array
from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, _shared_docs
from pandas.util.decorators import Appender, cache_readonly, deprecate
from pandas.core.common import isnull, array_equivalent
@@ -136,7 +136,12 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
else:
return result
elif issubclass(data.dtype.type, np.timedelta64):
- return Int64Index(data, copy=copy, name=name)
+ from pandas.tseries.tdi import TimedeltaIndex
+ result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
+ if dtype is not None and _o_dtype == dtype:
+ return Index(result.to_pytimedelta(), dtype=_o_dtype)
+ else:
+ return result
if dtype is not None:
try:
@@ -196,6 +201,10 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
tslib.is_timestamp_array(subarr)):
from pandas.tseries.index import DatetimeIndex
return DatetimeIndex(subarr, copy=copy, name=name, **kwargs)
+ elif (inferred.startswith('timedelta') or
+ lib.is_timedelta_array(subarr)):
+ from pandas.tseries.tdi import TimedeltaIndex
+ return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs)
elif inferred == 'period':
return PeriodIndex(subarr, name=name, **kwargs)
@@ -398,27 +407,25 @@ def __unicode__(self):
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
- def to_series(self, keep_tz=False):
+ def to_series(self, **kwargs):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
- Parameters
- ----------
- keep_tz : optional, defaults False.
- applies only to a DatetimeIndex
-
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
- import pandas as pd
- values = self._to_embed(keep_tz)
- return pd.Series(values, index=self, name=self.name)
+ from pandas import Series
+ return Series(self._to_embed(), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
- """ return an array repr of this object, potentially casting to object """
+ """
+ return an array repr of this object, potentially casting to object
+
+ This is for internal compat
+ """
return self.values
def astype(self, dtype):
@@ -931,8 +938,8 @@ def append(self, other):
@staticmethod
def _ensure_compat_concat(indexes):
- from pandas.tseries.api import DatetimeIndex, PeriodIndex
- klasses = DatetimeIndex, PeriodIndex
+ from pandas.tseries.api import DatetimeIndex, PeriodIndex, TimedeltaIndex
+ klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
is_ts = [isinstance(idx, klasses) for idx in indexes]
@@ -2043,6 +2050,13 @@ def drop_duplicates(self, take_last=False):
def duplicated(self, take_last=False):
return super(Index, self).duplicated(take_last=take_last)
+
+ def _evaluate_with_timedelta_like(self, other, op, opstr):
+ raise TypeError("can only perform ops with timedelta like values")
+
+ def _evaluate_with_datetime_like(self, other, op, opstr):
+ raise TypeError("can only perform ops with datetime like values")
+
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable """
@@ -2054,11 +2068,15 @@ def _invalid_op(self, other):
typ=type(self)))
return _invalid_op
- cls.__mul__ = cls.__rmul__ = _make_invalid_op('multiplication')
- cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('floor division')
- cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('true division')
+ cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__')
+ cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__')
+ cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__')
if not compat.PY3:
- cls.__div__ = cls.__rdiv__ = _make_invalid_op('division')
+ cls.__div__ = cls.__rdiv__ = _make_invalid_op('__div__')
+ cls.__neg__ = _make_invalid_op('__neg__')
+ cls.__pos__ = _make_invalid_op('__pos__')
+ cls.__abs__ = _make_invalid_op('__abs__')
+ cls.__inv__ = _make_invalid_op('__inv__')
@classmethod
def _add_numeric_methods(cls):
@@ -2067,6 +2085,7 @@ def _add_numeric_methods(cls):
def _make_evaluate_binop(op, opstr):
def _evaluate_numeric_binop(self, other):
+ import pandas.tseries.offsets as offsets
# if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
@@ -2086,6 +2105,10 @@ def _evaluate_numeric_binop(self, other):
other = _values_from_object(other)
if other.dtype.kind not in ['f','i']:
raise TypeError("cannot evaluate a numeric op with a non-numeric dtype")
+ elif isinstance(other, (offsets.DateOffset, np.timedelta64, Timedelta, datetime.timedelta)):
+ return self._evaluate_with_timedelta_like(other, op, opstr)
+ elif isinstance(other, (Timestamp, np.datetime64)):
+ return self._evaluate_with_datetime_like(other, op, opstr)
else:
if not (com.is_float(other) or com.is_integer(other)):
raise TypeError("can only perform ops with scalar values")
@@ -2093,12 +2116,29 @@ def _evaluate_numeric_binop(self, other):
return _evaluate_numeric_binop
+ def _make_evaluate_unary(op, opstr):
+
+ def _evaluate_numeric_unary(self):
+
+ # if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
+ if not self._is_numeric_dtype:
+ raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
+ typ=type(self)))
+
+ return self._shallow_copy(op(self.values))
- cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(operator.mul,'multiplication')
- cls.__floordiv__ = cls.__rfloordiv__ = _make_evaluate_binop(operator.floordiv,'floor division')
- cls.__truediv__ = cls.__rtruediv__ = _make_evaluate_binop(operator.truediv,'true division')
+ return _evaluate_numeric_unary
+
+ cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(operator.mul,'__mul__')
+ cls.__floordiv__ = cls.__rfloordiv__ = _make_evaluate_binop(operator.floordiv,'__floordiv__')
+ cls.__truediv__ = cls.__rtruediv__ = _make_evaluate_binop(operator.truediv,'__truediv__')
if not compat.PY3:
- cls.__div__ = cls.__rdiv__ = _make_evaluate_binop(operator.div,'division')
+ cls.__div__ = cls.__rdiv__ = _make_evaluate_binop(operator.div,'__div__')
+ cls.__neg__ = _make_evaluate_unary(lambda x: -x,'__neg__')
+ cls.__pos__ = _make_evaluate_unary(lambda x: x,'__pos__')
+ cls.__abs__ = _make_evaluate_unary(lambda x: np.abs(x),'__abs__')
+ cls.__inv__ = _make_evaluate_unary(lambda x: -x,'__inv__')
+
Index._add_numeric_methods_disabled()
class NumericIndex(Index):
@@ -4490,8 +4530,8 @@ def _get_consensus_names(indexes):
def _maybe_box(idx):
- from pandas.tseries.api import DatetimeIndex, PeriodIndex
- klasses = DatetimeIndex, PeriodIndex
+ from pandas.tseries.api import DatetimeIndex, PeriodIndex, TimedeltaIndex
+ klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
if isinstance(idx, klasses):
return idx.asobject
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 6672546fb4bad..95c82cc0233a4 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -24,7 +24,7 @@
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
-from pandas.tslib import Timestamp
+from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
@@ -357,6 +357,9 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
return self.copy()
return self
+ if klass is None:
+ if dtype == np.object_:
+ klass = ObjectBlock
try:
# force the copy here
if values is None:
@@ -1232,6 +1235,8 @@ def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
+ elif isinstance(value, Timedelta):
+ value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
@@ -1257,8 +1262,8 @@ def masker(v):
if _is_null_datelike_scalar(other):
other = np.nan
- elif isinstance(other, np.timedelta64):
- other = _coerce_scalar_to_timedelta_type(other, unit='s').item()
+ elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
+ other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
else:
@@ -1278,7 +1283,7 @@ def _try_coerce_result(self, result):
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
- result = np.timedelta64(result)
+ result = lib.Timedelta(result)
return result
def should_store(self, value):
@@ -1297,17 +1302,21 @@ def to_native_types(self, slicer=None, na_rep=None, **kwargs):
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
- rvalues.flat[imask] = np.array([lib.repr_timedelta64(val)
+
+ #### FIXME ####
+ # should use the core.format.Timedelta64Formatter here
+ # to figure what format to pass to the Timedelta
+ # e.g. to not show the decimals say
+ rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues.tolist()
def get_values(self, dtype=None):
- # return object dtypes as datetime.timedeltas
+ # return object dtypes as Timedelta
if dtype == object:
- return lib.map_infer(self.values.ravel(),
- lambda x: timedelta(microseconds=x.item() / 1000)
+ return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
@@ -1816,16 +1825,6 @@ def to_native_types(self, slicer=None, na_rep=None, date_format=None,
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
- def astype(self, dtype, copy=False, raise_on_error=True):
- """
- handle convert to object as a special case
- """
- klass = None
- if np.dtype(dtype).type == np.object_:
- klass = ObjectBlock
- return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
- klass=klass)
-
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index aa6140383a27a..163ae0ee5a199 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -72,6 +72,10 @@ def f(values, axis=None, skipna=True, **kwds):
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
+
+ # wrap the 0's if needed
+ if is_timedelta64_dtype(values):
+ return lib.Timedelta(0)
return 0
else:
result_shape = (values.shape[:axis] +
@@ -222,17 +226,7 @@ def _wrap_results(result, dtype):
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
-
- # this is a scalar timedelta result!
- # we have series convert then take the element (scalar)
- # as series will do the right thing in py3 (and deal with numpy
- # 1.6.2 bug in that it results dtype of timedelta64[us]
- from pandas import Series
-
- # coerce float to results
- if is_float(result):
- result = int(result)
- result = Series([result], dtype='timedelta64[ns]')
+ result = lib.Timedelta(result)
else:
result = result.view(dtype)
@@ -314,7 +308,7 @@ def get_median(x):
return ret
# otherwise return a scalar value
- return _wrap_results(get_median(values), dtype) if notempty else np.nan
+ return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof):
@@ -709,6 +703,10 @@ def unique1d(values):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('M8[ns]')
+ elif np.issubdtype(values.dtype, np.timedelta64):
+ table = _hash.Int64HashTable(len(values))
+ uniques = table.unique(_ensure_int64(values))
+ uniques = uniques.view('m8[ns]')
elif np.issubdtype(values.dtype, np.integer):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 7efcfb9898053..cad49aa68a250 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -575,7 +575,7 @@ def wrapper(self, other):
values = self.get_values()
other = _index.convert_scalar(values,_values_from_object(other))
- if issubclass(values.dtype.type, np.datetime64):
+ if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
values = values.view('i8')
# scalars
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 519e4c4457f04..4137b58885802 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -28,6 +28,7 @@
from pandas.core.internals import SingleBlockManager
from pandas.core.categorical import Categorical
from pandas.tseries.index import DatetimeIndex
+from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex, Period
from pandas import compat
from pandas.util.terminal import get_terminal_size
@@ -248,9 +249,7 @@ def _set_axis(self, axis, labels, fastpath=False):
is_all_dates = labels.is_all_dates
if is_all_dates:
- from pandas.tseries.index import DatetimeIndex
- from pandas.tseries.period import PeriodIndex
- if not isinstance(labels, (DatetimeIndex, PeriodIndex)):
+ if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
labels = DatetimeIndex(labels)
# need to set here becuase we changed the index
@@ -1003,6 +1002,8 @@ def __iter__(self):
return iter(self.values)
elif np.issubdtype(self.dtype, np.datetime64):
return (lib.Timestamp(x) for x in self.values)
+ elif np.issubdtype(self.dtype, np.timedelta64):
+ return (lib.Timedelta(x) for x in self.values)
else:
return iter(self.values)
@@ -1242,9 +1243,7 @@ def quantile(self, q=0.5):
0.75 3.25
dtype: float64
"""
- valid_values = self.dropna().values
- if len(valid_values) == 0:
- return pa.NA
+ valid = self.dropna()
def multi(values, qs):
if com.is_list_like(qs):
@@ -1253,17 +1252,7 @@ def multi(values, qs):
else:
return _quantile(values, qs*100)
- if com.is_datetime64_dtype(self):
- values = _values_from_object(self).view('i8')
- result = multi(values, q)
- if com.is_list_like(q):
- result = result.map(lib.Timestamp)
- else:
- result = lib.Timestamp(result)
- else:
- result = multi(valid_values, q)
-
- return result
+ return self._maybe_box(lambda values: multi(values, q), dropna=True)
def ptp(self, axis=None, out=None):
return _values_from_object(self).ptp(axis, out)
@@ -2016,9 +2005,49 @@ def _reduce(self, op, axis=0, skipna=True, numeric_only=None,
delegate = self.values
if isinstance(delegate, np.ndarray):
return op(delegate, skipna=skipna, **kwds)
+
return delegate._reduce(op=op, axis=axis, skipna=skipna, numeric_only=numeric_only,
filter_type=filter_type, name=name, **kwds)
+ def _maybe_box(self, func, dropna=False):
+ """
+ evaluate a function with possible input/output conversion if we are i8
+
+ Parameters
+ ----------
+ dropna : bool, default False
+ whether to drop values if necessary
+
+ """
+ if dropna:
+ values = self.dropna().values
+ else:
+ values = self.values
+
+ if com.needs_i8_conversion(self):
+ boxer = com.i8_boxer(self)
+
+ if len(values) == 0:
+ return boxer(iNaT)
+
+ values = values.view('i8')
+ result = func(values)
+
+ if com.is_list_like(result):
+ result = result.map(boxer)
+ else:
+ result = boxer(result)
+
+ else:
+
+ # let the function return nan if appropriate
+ if dropna:
+ if len(values) == 0:
+ return np.nan
+ result = func(values)
+
+ return result
+
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
@@ -2446,6 +2475,11 @@ def _sanitize_index(data, index, copy=False):
data = data._to_embed(keep_tz=True)
if copy:
data = data.copy()
+ elif isinstance(data, np.ndarray):
+
+ # coerce datetimelike types
+ if data.dtype.kind in ['M','m']:
+ data = _sanitize_array(data, index, copy=copy)
return data
diff --git a/pandas/index.pyx b/pandas/index.pyx
index 3dcdbf207fb3f..d6e358a96e904 100644
--- a/pandas/index.pyx
+++ b/pandas/index.pyx
@@ -1,7 +1,7 @@
from numpy cimport ndarray
from numpy cimport (float64_t, int32_t, int64_t, uint8_t,
- NPY_DATETIME)
+ NPY_DATETIME, NPY_TIMEDELTA)
cimport cython
cimport numpy as cnp
@@ -16,7 +16,7 @@ import numpy as np
cimport tslib
from hashtable cimport *
from pandas import algos, tslib, hashtable as _hash
-from pandas.tslib import Timestamp
+from pandas.tslib import Timestamp, Timedelta
from datetime cimport (get_datetime64_value, _pydatetime_to_dts,
pandas_datetimestruct)
@@ -57,6 +57,8 @@ cdef inline is_definitely_invalid_key(object val):
def get_value_at(ndarray arr, object loc):
if arr.descr.type_num == NPY_DATETIME:
return Timestamp(util.get_value_at(arr, loc))
+ elif arr.descr.type_num == NPY_TIMEDELTA:
+ return Timedelta(util.get_value_at(arr, loc))
return util.get_value_at(arr, loc)
def set_value_at(ndarray arr, object loc, object val):
@@ -108,6 +110,8 @@ cdef class IndexEngine:
else:
if arr.descr.type_num == NPY_DATETIME:
return Timestamp(util.get_value_at(arr, loc))
+ elif arr.descr.type_num == NPY_TIMEDELTA:
+ return Timedelta(util.get_value_at(arr, loc))
return util.get_value_at(arr, loc)
cpdef set_value(self, ndarray arr, object key, object value):
@@ -498,6 +502,9 @@ cdef class ObjectEngine(IndexEngine):
cdef class DatetimeEngine(Int64Engine):
+ cdef _get_box_dtype(self):
+ return 'M8[ns]'
+
def __contains__(self, object val):
if self.over_size_threshold and self.is_monotonic:
if not self.is_unique:
@@ -559,26 +566,31 @@ cdef class DatetimeEngine(Int64Engine):
def get_indexer(self, values):
self._ensure_mapping_populated()
- if values.dtype != 'M8[ns]':
+ if values.dtype != self._get_box_dtype():
return np.repeat(-1, len(values)).astype('i4')
values = np.asarray(values).view('i8')
return self.mapping.lookup(values)
def get_pad_indexer(self, other, limit=None):
- if other.dtype != 'M8[ns]':
+ if other.dtype != self._get_box_dtype():
return np.repeat(-1, len(other)).astype('i4')
other = np.asarray(other).view('i8')
return algos.pad_int64(self._get_index_values(), other,
limit=limit)
def get_backfill_indexer(self, other, limit=None):
- if other.dtype != 'M8[ns]':
+ if other.dtype != self._get_box_dtype():
return np.repeat(-1, len(other)).astype('i4')
other = np.asarray(other).view('i8')
return algos.backfill_int64(self._get_index_values(), other,
limit=limit)
+cdef class TimedeltaEngine(DatetimeEngine):
+
+ cdef _get_box_dtype(self):
+ return 'm8[ns]'
+
cpdef convert_scalar(ndarray arr, object value):
if arr.descr.type_num == NPY_DATETIME:
if isinstance(value,np.ndarray):
@@ -589,6 +601,15 @@ cpdef convert_scalar(ndarray arr, object value):
return iNaT
else:
return Timestamp(value).value
+ elif arr.descr.type_num == NPY_TIMEDELTA:
+ if isinstance(value,np.ndarray):
+ pass
+ elif isinstance(value, Timedelta):
+ return value.value
+ elif value is None or value != value:
+ return iNaT
+ else:
+ return Timedelta(value).value
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
if util.is_float_object(value) and value != value:
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 53ddd5c42a1d7..4a4b9da619b5f 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -325,7 +325,7 @@ def _execute_sql(self):
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _to_sql_save_index(self):
- df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
+ df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A'])
self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
ix_cols = self._get_index_columns('test_to_sql_saves_index')
@@ -523,6 +523,7 @@ def test_date_and_index(self):
"IntDateCol loaded with incorrect type")
def test_timedelta(self):
+
# see #6921
df = to_timedelta(Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
@@ -1067,7 +1068,7 @@ def _get_index_columns(self, tbl_name):
ixs = insp.get_indexes(tbl_name)
ixs = [i['column_names'] for i in ixs]
return ixs
-
+
def test_to_sql_save_index(self):
self._to_sql_save_index()
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 07b1efcd834db..7a90072b2410e 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -46,7 +46,7 @@ from datetime cimport *
from tslib cimport convert_to_tsobject, convert_to_timedelta64
import tslib
-from tslib import NaT, Timestamp, repr_timedelta64
+from tslib import NaT, Timestamp, Timedelta
cdef int64_t NPY_NAT = util.get_nat()
@@ -235,7 +235,7 @@ cpdef checknull_old(object val):
return util._checknull(val)
def isscalar(object val):
- return np.isscalar(val) or val is None or PyDateTime_Check(val)
+ return np.isscalar(val) or val is None or PyDateTime_Check(val) or PyDelta_Check(val)
@cython.wraparound(False)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index dd7bc41c8d62c..f508b8915da1c 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -5,10 +5,11 @@
import pandas.compat as compat
import pandas as pd
from pandas.compat import u, StringIO
-from pandas.core.base import FrozenList, FrozenNDArray, PandasDelegate, DatetimeIndexOpsMixin
+from pandas.core.base import FrozenList, FrozenNDArray, PandasDelegate
+from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas.tseries.common import is_datetimelike
-from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex
+from pandas import Series, Index, Int64Index, DatetimeIndex, TimedeltaIndex, PeriodIndex, Timedelta
import pandas.tslib as tslib
import nose
@@ -519,25 +520,21 @@ def test_value_counts_inferred(self):
td = klass(td)
result = td.value_counts()
- expected_s = Series([6], index=[86400000000000])
- self.assertEqual(result.index.dtype, 'int64')
+ expected_s = Series([6], index=[Timedelta('1day')])
tm.assert_series_equal(result, expected_s)
- # get nanoseconds to compare
- expected = np.array([86400000000000])
- self.assert_numpy_array_equal(td.unique(), expected)
- self.assertEqual(td.nunique(), 1)
+ expected = TimedeltaIndex(['1 days'])
+ if isinstance(td, TimedeltaIndex):
+ self.assertTrue(td.unique().equals(expected))
+ else:
+ self.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2)
result2 = td2.value_counts()
- self.assertEqual(result2.index.dtype, 'int64')
tm.assert_series_equal(result2, expected_s)
- self.assert_numpy_array_equal(td.unique(), expected)
- self.assertEqual(td.nunique(), 1)
-
def test_factorize(self):
for o in self.objs:
exp_arr = np.array(range(len(o)))
@@ -637,632 +634,6 @@ def test_duplicated_drop_duplicates(self):
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
-
-class TestDatetimeIndexOps(Ops):
- tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
- 'dateutil/Asia/Singapore', 'dateutil/US/Pacific']
-
- def setUp(self):
- super(TestDatetimeIndexOps, self).setUp()
- mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex) or is_datetimelike(x)
- self.is_valid_objs = [ o for o in self.objs if mask(o) ]
- self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
-
- def test_ops_properties(self):
- self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
- self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
- 'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
-
- def test_ops_properties_basic(self):
-
- # sanity check that the behavior didn't change
- # GH7206
- for op in ['year','day','second','weekday']:
- self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
-
- # attribute access should still work!
- s = Series(dict(year=2000,month=1,day=10))
- self.assertEquals(s.year,2000)
- self.assertEquals(s.month,1)
- self.assertEquals(s.day,10)
- self.assertRaises(AttributeError, lambda : s.weekday)
-
- def test_asobject_tolist(self):
- idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
- expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
- pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
- expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
- self.assertTrue(isinstance(result, Index))
- self.assertEqual(result.dtype, object)
- self.assertTrue(result.equals(expected))
- self.assertEqual(result.name, expected.name)
- self.assertEqual(idx.tolist(), expected_list)
-
- idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo')
- expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'),
- pd.Timestamp('2013-02-28', tz='Asia/Tokyo'),
- pd.Timestamp('2013-03-31', tz='Asia/Tokyo'),
- pd.Timestamp('2013-04-30', tz='Asia/Tokyo')]
- expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
- self.assertTrue(isinstance(result, Index))
- self.assertEqual(result.dtype, object)
- self.assertTrue(result.equals(expected))
- self.assertEqual(result.name, expected.name)
- self.assertEqual(idx.tolist(), expected_list)
-
- idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
- pd.NaT, datetime(2013, 1, 4)], name='idx')
- expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'),
- pd.NaT, pd.Timestamp('2013-01-04')]
- expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
- self.assertTrue(isinstance(result, Index))
- self.assertEqual(result.dtype, object)
- self.assertTrue(result.equals(expected))
- self.assertEqual(result.name, expected.name)
- self.assertEqual(idx.tolist(), expected_list)
-
- def test_minmax(self):
- for tz in self.tz:
- # monotonic
- idx1 = pd.DatetimeIndex([pd.NaT, '2011-01-01', '2011-01-02',
- '2011-01-03'], tz=tz)
- self.assertTrue(idx1.is_monotonic)
-
- # non-monotonic
- idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
- '2011-01-02', pd.NaT], tz=tz)
- self.assertFalse(idx2.is_monotonic)
-
- for idx in [idx1, idx2]:
- self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz))
- self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz))
-
- for op in ['min', 'max']:
- # Return NaT
- obj = DatetimeIndex([])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
-
- obj = DatetimeIndex([pd.NaT])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
-
- obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
-
- def test_representation(self):
- idx1 = DatetimeIndex([], freq='D')
- idx2 = DatetimeIndex(['2011-01-01'], freq='D')
- idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
- idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
- idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
- freq='H', tz='Asia/Tokyo')
- idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
- tz='US/Eastern')
-
- exp1 = """<class 'pandas.tseries.index.DatetimeIndex'>
-Length: 0, Freq: D, Timezone: None"""
- exp2 = """<class 'pandas.tseries.index.DatetimeIndex'>
-[2011-01-01]
-Length: 1, Freq: D, Timezone: None"""
- exp3 = """<class 'pandas.tseries.index.DatetimeIndex'>
-[2011-01-01, 2011-01-02]
-Length: 2, Freq: D, Timezone: None"""
- exp4 = """<class 'pandas.tseries.index.DatetimeIndex'>
-[2011-01-01, ..., 2011-01-03]
-Length: 3, Freq: D, Timezone: None"""
- exp5 = """<class 'pandas.tseries.index.DatetimeIndex'>
-[2011-01-01 09:00:00+09:00, ..., 2011-01-01 11:00:00+09:00]
-Length: 3, Freq: H, Timezone: Asia/Tokyo"""
- exp6 = """<class 'pandas.tseries.index.DatetimeIndex'>
-[2011-01-01 09:00:00-05:00, ..., NaT]
-Length: 3, Freq: None, Timezone: US/Eastern"""
-
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
- [exp1, exp2, exp3, exp4, exp5, exp6]):
- for func in ['__repr__', '__unicode__', '__str__']:
- result = getattr(idx, func)()
- self.assertEqual(result, expected)
-
- def test_resolution(self):
- for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
- ['day', 'day', 'day', 'day',
- 'hour', 'minute', 'second', 'millisecond', 'microsecond']):
- for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
- idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
- self.assertEqual(idx.resolution, expected)
-
- def test_add_iadd(self):
- for tz in self.tz:
- # union
- rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
- expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
-
- rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
- expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
-
- rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other3 = pd.DatetimeIndex([], tz=tz)
- expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
-
- for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
- (rng3, other3, expected3)]:
- result_union = rng.union(other)
- tm.assert_index_equal(result_union, expected)
-
- # offset
- offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')]
-
- for delta in offsets:
- rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
- result = rng + delta
- expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz)
- tm.assert_index_equal(result, expected)
- rng += delta
- tm.assert_index_equal(rng, expected)
-
- # int
- rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
- result = rng + 1
- expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz)
- tm.assert_index_equal(result, expected)
- rng += 1
- tm.assert_index_equal(rng, expected)
-
- def test_sub_isub(self):
- for tz in self.tz:
- # diff
- rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
- expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
-
- rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
- expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
-
- rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other3 = pd.DatetimeIndex([], tz=tz)
- expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
-
- for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
- (rng3, other3, expected3)]:
- result_union = rng.difference(other)
- tm.assert_index_equal(result_union, expected)
-
- # offset
- offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')]
-
- for delta in offsets:
- rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
- result = rng - delta
- expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz)
- tm.assert_index_equal(result, expected)
- rng -= delta
- tm.assert_index_equal(rng, expected)
-
- # int
- rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
- result = rng - 1
- expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz)
- tm.assert_index_equal(result, expected)
- rng -= 1
- tm.assert_index_equal(rng, expected)
-
- def test_value_counts_unique(self):
- # GH 7735
- for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']:
- idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
- # create repeated values, 'n'th element is repeated by n+1 times
- idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
-
- exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz)
- expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
- tm.assert_series_equal(idx.value_counts(), expected)
-
- expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz)
- tm.assert_index_equal(idx.unique(), expected)
-
- idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
- '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz)
-
- exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz)
- expected = Series([3, 2], index=exp_idx)
- tm.assert_series_equal(idx.value_counts(), expected)
-
- exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz)
- expected = Series([3, 2, 1], index=exp_idx)
- tm.assert_series_equal(idx.value_counts(dropna=False), expected)
-
- tm.assert_index_equal(idx.unique(), exp_idx)
-
-
-class TestPeriodIndexOps(Ops):
-
- def setUp(self):
- super(TestPeriodIndexOps, self).setUp()
- mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex) or is_datetimelike(x)
- self.is_valid_objs = [ o for o in self.objs if mask(o) ]
- self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
-
- def test_ops_properties(self):
- self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
- self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex))
-
- def test_asobject_tolist(self):
- idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx')
- expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'),
- pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')]
- expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
- self.assertTrue(isinstance(result, Index))
- self.assertEqual(result.dtype, object)
- self.assertTrue(result.equals(expected))
- self.assertEqual(result.name, expected.name)
- self.assertEqual(idx.tolist(), expected_list)
-
- idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx')
- expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'),
- pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')]
- expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
- self.assertTrue(isinstance(result, Index))
- self.assertEqual(result.dtype, object)
- for i in [0, 1, 3]:
- self.assertTrue(result[i], expected[i])
- self.assertTrue(result[2].ordinal, pd.tslib.iNaT)
- self.assertTrue(result[2].freq, 'D')
- self.assertEqual(result.name, expected.name)
-
- result_list = idx.tolist()
- for i in [0, 1, 3]:
- self.assertTrue(result_list[i], expected_list[i])
- self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT)
- self.assertTrue(result_list[2].freq, 'D')
-
- def test_minmax(self):
-
- # monotonic
- idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
- '2011-01-03'], freq='D')
- self.assertTrue(idx1.is_monotonic)
-
- # non-monotonic
- idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
- '2011-01-02', pd.NaT], freq='D')
- self.assertFalse(idx2.is_monotonic)
-
- for idx in [idx1, idx2]:
- self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
- self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
-
- for op in ['min', 'max']:
- # Return NaT
- obj = PeriodIndex([], freq='M')
- result = getattr(obj, op)()
- self.assertEqual(result.ordinal, tslib.iNaT)
- self.assertEqual(result.freq, 'M')
-
- obj = PeriodIndex([pd.NaT], freq='M')
- result = getattr(obj, op)()
- self.assertEqual(result.ordinal, tslib.iNaT)
- self.assertEqual(result.freq, 'M')
-
- obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
- result = getattr(obj, op)()
- self.assertEqual(result.ordinal, tslib.iNaT)
- self.assertEqual(result.freq, 'M')
-
- def test_representation(self):
- # GH 7601
- idx1 = PeriodIndex([], freq='D')
- idx2 = PeriodIndex(['2011-01-01'], freq='D')
- idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
- idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
- idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
- idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
-
- idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
- idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
- idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
-
- exp1 = """<class 'pandas.tseries.period.PeriodIndex'>
-Length: 0, Freq: D"""
- exp2 = """<class 'pandas.tseries.period.PeriodIndex'>
-[2011-01-01]
-Length: 1, Freq: D"""
- exp3 = """<class 'pandas.tseries.period.PeriodIndex'>
-[2011-01-01, 2011-01-02]
-Length: 2, Freq: D"""
- exp4 = """<class 'pandas.tseries.period.PeriodIndex'>
-[2011-01-01, ..., 2011-01-03]
-Length: 3, Freq: D"""
- exp5 = """<class 'pandas.tseries.period.PeriodIndex'>
-[2011, ..., 2013]
-Length: 3, Freq: A-DEC"""
- exp6 = """<class 'pandas.tseries.period.PeriodIndex'>
-[2011-01-01 09:00, ..., NaT]
-Length: 3, Freq: H"""
- exp7 = """<class 'pandas.tseries.period.PeriodIndex'>
-[2013Q1]
-Length: 1, Freq: Q-DEC"""
- exp8 = """<class 'pandas.tseries.period.PeriodIndex'>
-[2013Q1, 2013Q2]
-Length: 2, Freq: Q-DEC"""
- exp9 = """<class 'pandas.tseries.period.PeriodIndex'>
-[2013Q1, ..., 2013Q3]
-Length: 3, Freq: Q-DEC"""
-
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
- [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
- for func in ['__repr__', '__unicode__', '__str__']:
- result = getattr(idx, func)()
- self.assertEqual(result, expected)
-
- def test_resolution(self):
- for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
- ['day', 'day', 'day', 'day',
- 'hour', 'minute', 'second', 'millisecond', 'microsecond']):
-
- idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
- self.assertEqual(idx.resolution, expected)
-
- def test_add_iadd(self):
- # union
- rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
- other1 = pd.period_range('1/6/2000', freq='D', periods=5)
- expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
-
- rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
- other2 = pd.period_range('1/4/2000', freq='D', periods=5)
- expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
-
- rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
- other3 = pd.PeriodIndex([], freq='D')
- expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
-
- rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
- other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
- expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
- '2000-01-01 11:00', '2000-01-01 12:00',
- '2000-01-01 13:00', '2000-01-02 09:00',
- '2000-01-02 10:00', '2000-01-02 11:00',
- '2000-01-02 12:00', '2000-01-02 13:00'],
- freq='H')
-
- rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
- '2000-01-01 09:05'], freq='T')
- other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
- '2000-01-01 09:08'], freq='T')
- expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
- '2000-01-01 09:05', '2000-01-01 09:08'],
- freq='T')
-
- rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
- other6 = pd.period_range('2000-04-01', freq='M', periods=7)
- expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
-
- rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
- other7 = pd.period_range('1998-01-01', freq='A', periods=8)
- expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
-
- for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
- (rng3, other3, expected3), (rng4, other4, expected4),
- (rng5, other5, expected5), (rng6, other6, expected6),
- (rng7, other7, expected7)]:
-
- result_union = rng.union(other)
- tm.assert_index_equal(result_union, expected)
-
- # offset
- # DateOffset
- rng = pd.period_range('2014', '2024', freq='A')
- result = rng + pd.offsets.YearEnd(5)
- expected = pd.period_range('2019', '2029', freq='A')
- tm.assert_index_equal(result, expected)
- rng += pd.offsets.YearEnd(5)
- tm.assert_index_equal(rng, expected)
-
- for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
- np.timedelta64(365, 'D'), timedelta(365)]:
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- rng + o
-
- rng = pd.period_range('2014-01', '2016-12', freq='M')
- result = rng + pd.offsets.MonthEnd(5)
- expected = pd.period_range('2014-06', '2017-05', freq='M')
- tm.assert_index_equal(result, expected)
- rng += pd.offsets.MonthEnd(5)
- tm.assert_index_equal(rng, expected)
-
- for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
- np.timedelta64(365, 'D'), timedelta(365)]:
- rng = pd.period_range('2014-01', '2016-12', freq='M')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- rng + o
-
- # Tick
- offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
- pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
- for delta in offsets:
- rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
- result = rng + delta
- expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
- tm.assert_index_equal(result, expected)
- rng += delta
- tm.assert_index_equal(rng, expected)
-
- for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
- np.timedelta64(4, 'h'), timedelta(hours=23)]:
- rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- rng + o
-
- offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
- pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
- for delta in offsets:
- rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
- result = rng + delta
- expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H')
- tm.assert_index_equal(result, expected)
- rng += delta
- tm.assert_index_equal(rng, expected)
-
- for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
- rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- result = rng + delta
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- rng += delta
-
- # int
- rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
- result = rng + 1
- expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
- tm.assert_index_equal(result, expected)
- rng += 1
- tm.assert_index_equal(rng, expected)
-
- def test_sub_isub(self):
- # diff
- rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
- other1 = pd.period_range('1/6/2000', freq='D', periods=5)
- expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
-
- rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
- other2 = pd.period_range('1/4/2000', freq='D', periods=5)
- expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
-
- rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
- other3 = pd.PeriodIndex([], freq='D')
- expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
-
- rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
- other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
- expected4 = rng4
-
- rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
- '2000-01-01 09:05'], freq='T')
- other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
- expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
-
- rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
- other6 = pd.period_range('2000-04-01', freq='M', periods=7)
- expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
-
- rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
- other7 = pd.period_range('1998-01-01', freq='A', periods=8)
- expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
-
- for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
- (rng3, other3, expected3), (rng4, other4, expected4),
- (rng5, other5, expected5), (rng6, other6, expected6),
- (rng7, other7, expected7),]:
- result_union = rng.difference(other)
- tm.assert_index_equal(result_union, expected)
-
- # offset
- # DateOffset
- rng = pd.period_range('2014', '2024', freq='A')
- result = rng - pd.offsets.YearEnd(5)
- expected = pd.period_range('2009', '2019', freq='A')
- tm.assert_index_equal(result, expected)
- rng -= pd.offsets.YearEnd(5)
- tm.assert_index_equal(rng, expected)
-
- for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
- np.timedelta64(365, 'D'), timedelta(365)]:
- rng = pd.period_range('2014', '2024', freq='A')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- rng - o
-
- rng = pd.period_range('2014-01', '2016-12', freq='M')
- result = rng - pd.offsets.MonthEnd(5)
- expected = pd.period_range('2013-08', '2016-07', freq='M')
- tm.assert_index_equal(result, expected)
- rng -= pd.offsets.MonthEnd(5)
- tm.assert_index_equal(rng, expected)
-
- for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
- np.timedelta64(365, 'D'), timedelta(365)]:
- rng = pd.period_range('2014-01', '2016-12', freq='M')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- rng - o
-
- # Tick
- offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
- pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
- for delta in offsets:
- rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
- result = rng - delta
- expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
- tm.assert_index_equal(result, expected)
- rng -= delta
- tm.assert_index_equal(rng, expected)
-
- for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
- np.timedelta64(4, 'h'), timedelta(hours=23)]:
- rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- rng - o
-
- offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
- pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
- for delta in offsets:
- rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
- result = rng - delta
- expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H')
- tm.assert_index_equal(result, expected)
- rng -= delta
- tm.assert_index_equal(rng, expected)
-
- for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
- rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- result = rng + delta
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
- rng += delta
-
- # int
- rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
- result = rng - 1
- expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
- tm.assert_index_equal(result, expected)
- rng -= 1
- tm.assert_index_equal(rng, expected)
-
- def test_value_counts_unique(self):
- # GH 7735
- idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
- # create repeated values, 'n'th element is repeated by n+1 times
- idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H')
-
- exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00',
- '2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00',
- '2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00',
- '2011-01-01 09:00'], freq='H')
- expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
- tm.assert_series_equal(idx.value_counts(), expected)
-
- expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
- tm.assert_index_equal(idx.unique(), expected)
-
- idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
- '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H')
-
- exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H')
- expected = Series([3, 2], index=exp_idx)
- tm.assert_series_equal(idx.value_counts(), expected)
-
- exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H')
- expected = Series([3, 2, 1], index=exp_idx)
- tm.assert_series_equal(idx.value_counts(dropna=False), expected)
-
- tm.assert_index_equal(idx.unique(), exp_idx)
-
-
if __name__ == '__main__':
import nose
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index d6f734be56c32..51d767a291694 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -2460,33 +2460,33 @@ def test_timedelta64(self):
o = Series([datetime(2012,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
- self.assertTrue('-0 days, 00:00:00.000150' in result)
+ self.assertTrue('-1 days +23:59:59.999850' in result)
# rounding?
o = Series([datetime(2012,1,1,1)]*3)
y = s-o
result = y.to_string()
- self.assertTrue('-0 days, 01:00:00' in result)
- self.assertTrue('1 days, 23:00:00' in result)
+ self.assertTrue('-1 days +23:00:00' in result)
+ self.assertTrue('1 days 23:00:00' in result)
o = Series([datetime(2012,1,1,1,1)]*3)
y = s-o
result = y.to_string()
- self.assertTrue('-0 days, 01:01:00' in result)
- self.assertTrue('1 days, 22:59:00' in result)
+ self.assertTrue('-1 days +22:59:00' in result)
+ self.assertTrue('1 days 22:59:00' in result)
o = Series([datetime(2012,1,1,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
- self.assertTrue('-0 days, 01:01:00.000150' in result)
- self.assertTrue('1 days, 22:58:59.999850' in result)
+ self.assertTrue('-1 days +22:58:59.999850' in result)
+ self.assertTrue('0 days 22:58:59.999850' in result)
# neg time
td = timedelta(minutes=5,seconds=3)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - s2
result = y.to_string()
- self.assertTrue('-00:05:03' in result)
+ self.assertTrue('-1 days +23:54:57' in result)
td = timedelta(microseconds=550)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
@@ -2494,6 +2494,11 @@ def test_timedelta64(self):
result = y.to_string()
self.assertTrue('2012-01-01 23:59:59.999450' in result)
+ # no boxing of the actual elements
+ td = Series(pd.timedelta_range('1 days',periods=3))
+ result = td.to_string()
+ self.assertEqual(result,u("0 1 days\n1 2 days\n2 3 days"))
+
def test_mixed_datetime64(self):
df = DataFrame({'A': [1, 2],
'B': ['2012-01-01', '2012-01-02']})
@@ -2759,33 +2764,51 @@ def test_format(self):
class TestRepr_timedelta64(tm.TestCase):
- def test_legacy(self):
+
+ def test_none(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
- self.assertEqual(tslib.repr_timedelta64(delta_1d), "1 days, 00:00:00")
- self.assertEqual(tslib.repr_timedelta64(-delta_1d), "-1 days, 00:00:00")
- self.assertEqual(tslib.repr_timedelta64(delta_0d), "00:00:00")
- self.assertEqual(tslib.repr_timedelta64(delta_1s), "00:00:01")
- self.assertEqual(tslib.repr_timedelta64(delta_500ms), "00:00:00.500000")
- self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_1s), "1 days, 00:00:01")
- self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_500ms), "1 days, 00:00:00.500000")
+ drepr = lambda x: x._repr_base()
+ self.assertEqual(drepr(delta_1d), "1 days")
+ self.assertEqual(drepr(-delta_1d), "-1 days")
+ self.assertEqual(drepr(delta_0d), "0 days")
+ self.assertEqual(drepr(delta_1s), "0 days 00:00:01")
+ self.assertEqual(drepr(delta_500ms), "0 days 00:00:00.500000")
+ self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
+ self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
- def test_short(self):
+ def test_even_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
- self.assertEqual(tslib.repr_timedelta64(delta_1d, format='short'), "1 days")
- self.assertEqual(tslib.repr_timedelta64(-delta_1d, format='short'), "-1 days")
- self.assertEqual(tslib.repr_timedelta64(delta_0d, format='short'), "00:00:00")
- self.assertEqual(tslib.repr_timedelta64(delta_1s, format='short'), "00:00:01")
- self.assertEqual(tslib.repr_timedelta64(delta_500ms, format='short'), "00:00:00.500000")
- self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_1s, format='short'), "1 days, 00:00:01")
- self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_500ms, format='short'), "1 days, 00:00:00.500000")
+ drepr = lambda x: x._repr_base(format='even_day')
+ self.assertEqual(drepr(delta_1d), "1 days")
+ self.assertEqual(drepr(-delta_1d), "-1 days")
+ self.assertEqual(drepr(delta_0d), "0 days")
+ self.assertEqual(drepr(delta_1s), "0 days 00:00:01")
+ self.assertEqual(drepr(delta_500ms), "0 days 00:00:00.500000")
+ self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
+ self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
+
+ def test_sub_day(self):
+ delta_1d = pd.to_timedelta(1, unit='D')
+ delta_0d = pd.to_timedelta(0, unit='D')
+ delta_1s = pd.to_timedelta(1, unit='s')
+ delta_500ms = pd.to_timedelta(500, unit='ms')
+
+ drepr = lambda x: x._repr_base(format='sub_day')
+ self.assertEqual(drepr(delta_1d), "1 days")
+ self.assertEqual(drepr(-delta_1d), "-1 days")
+ self.assertEqual(drepr(delta_0d), "00:00:00")
+ self.assertEqual(drepr(delta_1s), "00:00:01")
+ self.assertEqual(drepr(delta_500ms), "00:00:00.500000")
+ self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
+ self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
def test_long(self):
delta_1d = pd.to_timedelta(1, unit='D')
@@ -2793,65 +2816,69 @@ def test_long(self):
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
- self.assertEqual(tslib.repr_timedelta64(delta_1d, format='long'), "1 days, 00:00:00")
- self.assertEqual(tslib.repr_timedelta64(-delta_1d, format='long'), "-1 days, 00:00:00")
- self.assertEqual(tslib.repr_timedelta64(delta_0d, format='long'), "0 days, 00:00:00")
- self.assertEqual(tslib.repr_timedelta64(delta_1s, format='long'), "0 days, 00:00:01")
- self.assertEqual(tslib.repr_timedelta64(delta_500ms, format='long'), "0 days, 00:00:00.500000")
- self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_1s, format='long'), "1 days, 00:00:01")
- self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_500ms, format='long'), "1 days, 00:00:00.500000")
+ drepr = lambda x: x._repr_base(format='long')
+ self.assertEqual(drepr(delta_1d), "1 days 00:00:00")
+ self.assertEqual(drepr(-delta_1d), "-1 days +00:00:00")
+ self.assertEqual(drepr(delta_0d), "0 days 00:00:00")
+ self.assertEqual(drepr(delta_1s), "0 days 00:00:01")
+ self.assertEqual(drepr(delta_500ms), "0 days 00:00:00.500000")
+ self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
+ self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
+ def test_all(self):
+ delta_1d = pd.to_timedelta(1, unit='D')
+ delta_0d = pd.to_timedelta(0, unit='D')
+ delta_1ns = pd.to_timedelta(1, unit='ns')
-class TestTimedelta64Formatter(tm.TestCase):
- def test_mixed(self):
- x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
- y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
- result = fmt.Timedelta64Formatter(x + y).get_result()
- self.assertEqual(result[0].strip(), "0 days, 00:00:00")
- self.assertEqual(result[1].strip(), "1 days, 00:00:01")
+ drepr = lambda x: x._repr_base(format='all')
+ self.assertEqual(drepr(delta_1d), "1 days 00:00:00.000000000")
+ self.assertEqual(drepr(delta_0d), "0 days 00:00:00.000000000")
+ self.assertEqual(drepr(delta_1ns), "0 days 00:00:00.000000001")
- def test_mixed_neg(self):
- x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
- y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
- result = fmt.Timedelta64Formatter(-(x + y)).get_result()
- self.assertEqual(result[0].strip(), "0 days, 00:00:00")
- self.assertEqual(result[1].strip(), "-1 days, 00:00:01")
+class TestTimedelta64Formatter(tm.TestCase):
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
- result = fmt.Timedelta64Formatter(x).get_result()
+ result = fmt.Timedelta64Formatter(x,box=True).get_result()
+ self.assertEqual(result[0].strip(), "'0 days'")
+ self.assertEqual(result[1].strip(), "'1 days'")
+
+ result = fmt.Timedelta64Formatter(x[1:2],box=True).get_result()
+ self.assertEqual(result[0].strip(), "'1 days'")
+
+ result = fmt.Timedelta64Formatter(x,box=False).get_result()
self.assertEqual(result[0].strip(), "0 days")
self.assertEqual(result[1].strip(), "1 days")
- result = fmt.Timedelta64Formatter(x[1:2]).get_result()
+ result = fmt.Timedelta64Formatter(x[1:2],box=False).get_result()
self.assertEqual(result[0].strip(), "1 days")
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
- result = fmt.Timedelta64Formatter(-x).get_result()
- self.assertEqual(result[0].strip(), "0 days")
- self.assertEqual(result[1].strip(), "-1 days")
+ result = fmt.Timedelta64Formatter(-x,box=True).get_result()
+ self.assertEqual(result[0].strip(), "'0 days'")
+ self.assertEqual(result[1].strip(), "'-1 days'")
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
- result = fmt.Timedelta64Formatter(y).get_result()
- self.assertEqual(result[0].strip(), "00:00:00")
- self.assertEqual(result[1].strip(), "00:00:01")
+ result = fmt.Timedelta64Formatter(y,box=True).get_result()
+ self.assertEqual(result[0].strip(), "'00:00:00'")
+ self.assertEqual(result[1].strip(), "'00:00:01'")
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
- result = fmt.Timedelta64Formatter(-y).get_result()
- self.assertEqual(result[0].strip(), "00:00:00")
- self.assertEqual(result[1].strip(), "-00:00:01")
+ result = fmt.Timedelta64Formatter(-y,box=True).get_result()
+ self.assertEqual(result[0].strip(), "'00:00:00'")
+ self.assertEqual(result[1].strip(), "'-1 days +23:59:59'")
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')
- result = fmt.Timedelta64Formatter(x).get_result()
- self.assertEqual(result[0].strip(), "0 days")
+ result = fmt.Timedelta64Formatter(x,box=True).get_result()
+ self.assertEqual(result[0].strip(), "'0 days'")
x = pd.to_timedelta(list(range(1)), unit='D')
- result = fmt.Timedelta64Formatter(x).get_result()
- self.assertEqual(result[0].strip(), "0 days")
+ result = fmt.Timedelta64Formatter(x,box=True).get_result()
+ self.assertEqual(result[0].strip(), "'0 days'")
class TestDatetime64Formatter(tm.TestCase):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index a7de624842b2b..8245d1bd0759c 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -32,7 +32,8 @@
import pandas.core.format as fmt
import pandas.core.datetools as datetools
from pandas import (DataFrame, Index, Series, notnull, isnull,
- MultiIndex, DatetimeIndex, Timestamp, date_range, read_csv,
+ MultiIndex, DatetimeIndex, Timestamp, date_range,
+ read_csv, timedelta_range, Timedelta,
option_context)
import pandas as pd
from pandas.parser import CParserError
@@ -9515,6 +9516,18 @@ def test_diff(self):
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
+ def test_diff_timedelta(self):
+ # GH 4533
+ df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
+ Timestamp('20130101 9:02')],
+ value=[1.0,2.0]))
+
+ res = df.diff()
+ exp = DataFrame([[pd.NaT, np.nan],
+ [Timedelta('00:01:00'), 1]],
+ columns=['time', 'value'])
+ assert_frame_equal(res, exp)
+
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
@@ -12175,6 +12188,42 @@ def test_construction_with_mixed(self):
'timedelta64[ns]' : 1}).order()
assert_series_equal(result,expected)
+ def test_construction_with_conversions(self):
+
+ # convert from a numpy array of non-ns timedelta64
+ arr = np.array([1,2,3],dtype='timedelta64[s]')
+ s = Series(arr)
+ expected = Series(timedelta_range('00:00:01',periods=3,freq='s'))
+ assert_series_equal(s,expected)
+
+ df = DataFrame(index=range(3))
+ df['A'] = arr
+ expected = DataFrame({'A' : timedelta_range('00:00:01',periods=3,freq='s')},
+ index=range(3))
+ assert_frame_equal(df,expected)
+
+ # convert from a numpy array of non-ns datetime64
+ #### note that creating a numpy datetime64 is in LOCAL time!!!!
+ #### seems to work for M8[D], but not for M8[s]
+
+ s = Series(np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]'))
+ assert_series_equal(s,Series(date_range('20130101',periods=3,freq='D')))
+ #s = Series(np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))
+ #assert_series_equal(s,date_range('20130101 00:00:01',period=3,freq='s'))
+
+ expected = DataFrame({
+ 'dt1' : Timestamp('20130101'),
+ 'dt2' : date_range('20130101',periods=3),
+ #'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
+ },index=range(3))
+
+
+ df = DataFrame(index=range(3))
+ df['dt1'] = np.datetime64('2013-01-01')
+ df['dt2'] = np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]')
+ #df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
+ assert_frame_equal(df, expected)
+
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 8d0b54f2ef0b4..295af483289e5 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -16,6 +16,7 @@
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
+from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
@@ -53,13 +54,13 @@ def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
- "cannot perform multiplication",
+ "cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
- "cannot perform multiplication",
+ "cannot perform __mul__",
lambda : 1 * idx)
- div_err = "cannot perform true division" if compat.PY3 else "cannot perform division"
+ div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
@@ -67,10 +68,10 @@ def test_numeric_compat(self):
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
- "cannot perform floor division",
+ "cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
- "cannot perform floor division",
+ "cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
@@ -1654,6 +1655,52 @@ def create_index(self):
def test_pickle_compat_construction(self):
pass
+class TestTimedeltaIndex(Base, tm.TestCase):
+ _holder = TimedeltaIndex
+ _multiprocess_can_split_ = True
+
+ def create_index(self):
+ return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
+
+ def test_numeric_compat(self):
+
+ idx = self._holder(np.arange(5,dtype='int64'))
+ didx = self._holder(np.arange(5,dtype='int64')**2
+ )
+ result = idx * 1
+ tm.assert_index_equal(result, idx)
+
+ result = 1 * idx
+ tm.assert_index_equal(result, idx)
+
+ result = idx / 1
+ tm.assert_index_equal(result, idx)
+
+ result = idx // 1
+ tm.assert_index_equal(result, idx)
+
+ result = idx * np.array(5,dtype='int64')
+ tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
+
+ result = idx * np.arange(5,dtype='int64')
+ tm.assert_index_equal(result, didx)
+
+ result = idx * Series(np.arange(5,dtype='int64'))
+ tm.assert_index_equal(result, didx)
+
+ result = idx * Series(np.arange(5,dtype='float64')+0.1)
+ tm.assert_index_equal(result,
+ Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
+
+
+ # invalid
+ self.assertRaises(TypeError, lambda : idx * idx)
+ self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
+ self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
+
+ def test_pickle_compat_construction(self):
+ pass
+
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 3e8a5fecbb579..509ef4925bb66 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -118,11 +118,32 @@ def setUp(self):
def check_results(self, targ, res, axis):
res = getattr(res, 'asm8', res)
res = getattr(res, 'values', res)
- if axis != 0 and hasattr(targ, 'shape') and targ.ndim:
- res = np.split(res, [targ.shape[0]], axis=0)[0]
+
+ # timedeltas are a beast here
+ def _coerce_tds(targ, res):
+ if targ.dtype == 'm8[ns]':
+ if len(targ) == 1:
+ targ = targ[0].item()
+ res = res.item()
+ else:
+ targ = targ.view('i8')
+ return targ, res
+
+ try:
+ if axis != 0 and hasattr(targ, 'shape') and targ.ndim:
+ res = np.split(res, [targ.shape[0]], axis=0)[0]
+ except:
+ targ, res = _coerce_tds(targ, res)
+
try:
tm.assert_almost_equal(targ, res)
except:
+
+ if targ.dtype == 'm8[ns]':
+ targ, res = _coerce_tds(targ, res)
+ tm.assert_almost_equal(targ, res)
+ return
+
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
@@ -208,7 +229,7 @@ def check_fun(self, testfunc, targfunc,
def check_funs(self, testfunc, targfunc,
allow_complex=True, allow_all_nan=True, allow_str=True,
- allow_date=True, allow_obj=True,
+ allow_date=True, allow_tdelta=True, allow_obj=True,
**kwargs):
self.check_fun(testfunc, targfunc, 'arr_float', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float',
@@ -244,6 +265,8 @@ def check_funs(self, testfunc, targfunc,
else:
self.check_fun(testfunc, targfunc, 'arr_date', **kwargs)
objs += [self.arr_date.astype('O')]
+
+ if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
@@ -264,12 +287,12 @@ def check_funs(self, testfunc, targfunc,
def check_funs_ddof(self, testfunc, targfunc,
allow_complex=True, allow_all_nan=True, allow_str=True,
- allow_date=True, allow_obj=True,):
+ allow_date=False, allow_tdelta=False, allow_obj=True,):
for ddof in range(3):
try:
self.check_funs(self, testfunc, targfunc,
allow_complex, allow_all_nan, allow_str,
- allow_date, allow_obj,
+ allow_date, allow_tdelta, allow_obj,
ddof=ddof)
except BaseException as exc:
exc.args += ('ddof %s' % ddof,)
@@ -284,34 +307,35 @@ def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
def test_nanany(self):
self.check_funs(nanops.nanany, np.any,
- allow_all_nan=False, allow_str=False, allow_date=False)
+ allow_all_nan=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanall(self):
self.check_funs(nanops.nanall, np.all,
- allow_all_nan=False, allow_str=False, allow_date=False)
+ allow_all_nan=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nansum(self):
self.check_funs(nanops.nansum, np.sum,
- allow_str=False, allow_date=False)
+ allow_str=False, allow_date=False, allow_tdelta=True)
def test_nanmean(self):
self.check_funs(nanops.nanmean, np.mean,
allow_complex=False, allow_obj=False,
- allow_str=False, allow_date=False)
+ allow_str=False, allow_date=False, allow_tdelta=True)
def test_nanmedian(self):
self.check_funs(nanops.nanmedian, np.median,
allow_complex=False, allow_str=False, allow_date=False,
+ allow_tdelta=True,
allow_obj='convert')
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var,
- allow_complex=False, allow_date=False)
+ allow_complex=False, allow_date=False, allow_tdelta=False)
def test_nansem(self):
tm.skip_if_no_package('scipy.stats')
self.check_funs_ddof(nanops.nansem, np.var,
- allow_complex=False, allow_date=False)
+ allow_complex=False, allow_date=False, allow_tdelta=False)
def _minmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
@@ -343,13 +367,16 @@ def _argminmax_wrap(self, value, axis=None, func=None):
def test_nanargmax(self):
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func,
- allow_str=False, allow_obj=False)
+ allow_str=False, allow_obj=False,
+ allow_date=True,
+ allow_tdelta=True)
def test_nanargmin(self):
func = partial(self._argminmax_wrap, func=np.argmin)
if tm.sys.version_info[0:2] == (2, 6):
self.check_funs(nanops.nanargmin, func,
- allow_date=False,
+ allow_date=True,
+ allow_tdelta=True,
allow_str=False, allow_obj=False)
else:
self.check_funs(nanops.nanargmin, func,
@@ -372,7 +399,7 @@ def test_nanskew(self):
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
self.check_funs(nanops.nanskew, func,
- allow_complex=False, allow_str=False, allow_date=False)
+ allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nankurt(self):
tm.skip_if_no_package('scipy.stats')
@@ -380,11 +407,11 @@ def test_nankurt(self):
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
self.check_funs(nanops.nankurt, func,
- allow_complex=False, allow_str=False, allow_date=False)
+ allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanprod(self):
self.check_funs(nanops.nanprod, np.prod,
- allow_str=False, allow_date=False)
+ allow_str=False, allow_date=False, allow_tdelta=False)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d,
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 02a8f79e5a8c1..0b76d6247060d 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -16,10 +16,12 @@
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,
- date_range, period_range)
+ date_range, period_range, timedelta_range)
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
+from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
+from pandas.tseries.tdi import Timedelta, TimedeltaIndex
import pandas.core.common as com
import pandas.core.config as cf
import pandas.lib as lib
@@ -76,18 +78,31 @@ def test_dt_namespace_accessor(self):
# GH 7207
# test .dt namespace accessor
- ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter']
+ ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter','freq']
ok_for_period = ok_for_base + ['qyear']
ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
- 'is_quarter_end', 'is_year_start', 'is_year_end']
+ 'is_quarter_end', 'is_year_start', 'is_year_end', 'tz']
+ ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert']
+ ok_for_td = ['days','hours','minutes','seconds','milliseconds','microseconds','nanoseconds']
+ ok_for_td_methods = ['components','to_pytimedelta']
def get_expected(s, name):
result = getattr(Index(s.values),prop)
if isinstance(result, np.ndarray):
if com.is_integer_dtype(result):
result = result.astype('int64')
+ elif not com.is_list_like(result):
+ return result
return Series(result,index=s.index)
+ def compare(s, name):
+ a = getattr(s.dt,prop)
+ b = get_expected(s,prop)
+ if not (com.is_list_like(a) and com.is_list_like(b)):
+ self.assertEqual(a,b)
+ else:
+ tm.assert_series_equal(a,b)
+
# invalids
for s in [Series(np.arange(5)),
Series(list('abcde')),
@@ -98,9 +113,51 @@ def get_expected(s, name):
for s in [Series(date_range('20130101',periods=5)),
Series(date_range('20130101',periods=5,freq='s')),
Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]:
-
for prop in ok_for_dt:
- tm.assert_series_equal(getattr(s.dt,prop),get_expected(s,prop))
+
+ # we test freq below
+ if prop != 'freq':
+ compare(s, prop)
+
+ for prop in ok_for_dt_methods:
+ getattr(s.dt,prop)
+
+ result = s.dt.to_pydatetime()
+ self.assertIsInstance(result,np.ndarray)
+ self.assertTrue(result.dtype == object)
+
+ result = s.dt.tz_localize('US/Eastern')
+ expected = Series(DatetimeIndex(s.values).tz_localize('US/Eastern'),index=s.index)
+ tm.assert_series_equal(result, expected)
+
+ tz_result = result.dt.tz
+ self.assertEqual(str(tz_result), 'US/Eastern')
+ freq_result = s.dt.freq
+ self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)
+
+ # timedeltaindex
+ for s in [Series(timedelta_range('1 day',periods=5)),
+ Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')),
+ Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]:
+ for prop in ok_for_td:
+
+ # we test freq below
+ if prop != 'freq':
+ compare(s, prop)
+
+ for prop in ok_for_td_methods:
+ getattr(s.dt,prop)
+
+ result = s.dt.components
+ self.assertIsInstance(result,DataFrame)
+ tm.assert_index_equal(result.index,s.index)
+
+ result = s.dt.to_pytimedelta()
+ self.assertIsInstance(result,np.ndarray)
+ self.assertTrue(result.dtype == object)
+
+ freq_result = s.dt.freq
+ self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq)
# both
index = date_range('20130101',periods=3,freq='D')
@@ -113,7 +170,13 @@ def get_expected(s, name):
for s in [Series(period_range('20130101',periods=5,freq='D'))]:
for prop in ok_for_period:
- tm.assert_series_equal(getattr(s.dt,prop),get_expected(s,prop))
+
+ # we test freq below
+ if prop != 'freq':
+ compare(s, prop)
+
+ freq_result = s.dt.freq
+ self.assertEqual(freq_result, PeriodIndex(s.values).freq)
# test limited display api
def get_dir(s):
@@ -122,7 +185,7 @@ def get_dir(s):
s = Series(date_range('20130101',periods=5,freq='D'))
results = get_dir(s)
- tm.assert_almost_equal(results,list(sorted(set(ok_for_dt))))
+ tm.assert_almost_equal(results,list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101',periods=5,freq='D').asobject)
results = get_dir(s)
@@ -2216,7 +2279,7 @@ def testit():
self.series[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
- if not ('max' in name or 'min' in name):
+ if name not in ['max','min']:
ds = Series(date_range('1/1/2001', periods=10))
self.assertRaises(TypeError, f, ds)
@@ -2820,6 +2883,16 @@ def test_timedelta64_conversions(self):
expected = s1.apply(lambda x: np.timedelta64(m,unit) / x)
result = np.timedelta64(m,unit) / s1
+ # astype
+ s = Series(date_range('20130101',periods=3))
+ result = s.astype(object)
+ self.assertIsInstance(result.iloc[0],datetime)
+ self.assertTrue(result.dtype == np.object_)
+
+ result = s1.astype(object)
+ self.assertIsInstance(result.iloc[0],timedelta)
+ self.assertTrue(result.dtype == np.object_)
+
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'),
@@ -2846,6 +2919,22 @@ def timedelta64(*args):
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, us, lhs, rhs))
+ def test_timedelta_assignment(self):
+ # GH 8209
+ s = Series([])
+ s.loc['B'] = timedelta(1)
+ tm.assert_series_equal(s,Series(Timedelta('1 days'),index=['B']))
+
+ s = s.reindex(s.index.insert(0, 'A'))
+ tm.assert_series_equal(s,Series([np.nan,Timedelta('1 days')],index=['A','B']))
+
+ result = s.fillna(timedelta(1))
+ expected = Series(Timedelta('1 days'),index=['A','B'])
+ tm.assert_series_equal(result, expected)
+
+ s.loc['A'] = timedelta(1)
+ tm.assert_series_equal(s, expected)
+
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
@@ -2955,12 +3044,37 @@ def test_timedelta64_functions(self):
# max/min
result = td.max()
- expected = Series([timedelta(2)], dtype='timedelta64[ns]')
- assert_series_equal(result, expected)
+ expected = Timedelta('2 days')
+ self.assertEqual(result, expected)
result = td.min()
- expected = Series([timedelta(1)], dtype='timedelta64[ns]')
- assert_series_equal(result, expected)
+ expected = Timedelta('1 days')
+ self.assertEqual(result, expected)
+
+ def test_ops_consistency_on_empty(self):
+
+ # GH 7869
+ # consistency on empty
+
+ # float
+ result = Series(dtype=float).sum()
+ self.assertEqual(result,0)
+
+ result = Series(dtype=float).mean()
+ self.assertTrue(isnull(result))
+
+ result = Series(dtype=float).median()
+ self.assertTrue(isnull(result))
+
+ # timedelta64[ns]
+ result = Series(dtype='m8[ns]').sum()
+ self.assertEqual(result, Timedelta(0))
+
+ result = Series(dtype='m8[ns]').mean()
+ self.assertTrue(result is pd.NaT)
+
+ result = Series(dtype='m8[ns]').median()
+ self.assertTrue(result is pd.NaT)
def test_timedelta_fillna(self):
#GH 3371
@@ -3212,19 +3326,19 @@ def test_timedelta64_nan(self):
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
- self.assertEqual(td1[0].view('i8'), tslib.iNaT)
+ self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
- self.assertEqual(td1[1].view('i8'), tslib.iNaT)
+ self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
- self.assertEqual(td1[2].view('i8'), tslib.iNaT)
+ self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
diff --git a/pandas/tseries/api.py b/pandas/tseries/api.py
index c2cc3723802fc..7c47bd9a232a9 100644
--- a/pandas/tseries/api.py
+++ b/pandas/tseries/api.py
@@ -5,6 +5,7 @@
from pandas.tseries.index import DatetimeIndex, date_range, bdate_range
from pandas.tseries.frequencies import infer_freq
+from pandas.tseries.tdi import Timedelta, TimedeltaIndex, timedelta_range
from pandas.tseries.period import Period, PeriodIndex, period_range, pnow
from pandas.tseries.resample import TimeGrouper
from pandas.tseries.timedeltas import to_timedelta
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
new file mode 100644
index 0000000000000..1d9a062c624f7
--- /dev/null
+++ b/pandas/tseries/base.py
@@ -0,0 +1,469 @@
+"""
+Base and utility classes for tseries type pandas objects.
+"""
+
+
+from datetime import datetime, time, timedelta
+
+from pandas import compat
+import numpy as np
+from pandas.core import common as com
+import pandas.tslib as tslib
+import pandas.lib as lib
+from pandas.core.index import Index
+from pandas.util.decorators import Appender, cache_readonly
+from pandas.tseries.frequencies import (
+ infer_freq, to_offset, get_period_alias,
+ Resolution)
+import pandas.algos as _algos
+
+class DatetimeIndexOpsMixin(object):
+ """ common ops mixin to support a unified inteface datetimelike Index """
+
+ def __iter__(self):
+ return (self._box_func(v) for v in self.asi8)
+
+ @staticmethod
+ def _join_i8_wrapper(joinf, dtype, with_indexers=True):
+ """ create the join wrapper methods """
+
+ @staticmethod
+ def wrapper(left, right):
+ if isinstance(left, (np.ndarray, com.ABCIndex, com.ABCSeries)):
+ left = left.view('i8')
+ if isinstance(right, (np.ndarray, com.ABCIndex, com.ABCSeries)):
+ right = right.view('i8')
+ results = joinf(left, right)
+ if with_indexers:
+ join_index, left_indexer, right_indexer = results
+ join_index = join_index.view(dtype)
+ return join_index, left_indexer, right_indexer
+ return results
+
+ return wrapper
+
+ @property
+ def _box_func(self):
+ """
+ box function to get object from internal representation
+ """
+ raise NotImplementedError
+
+ def _box_values(self, values):
+ """
+ apply box func to passed values
+ """
+ return lib.map_infer(values, self._box_func)
+
+ def groupby(self, f):
+ objs = self.asobject.values
+ return _algos.groupby_object(objs, f)
+
+ def _format_with_header(self, header, **kwargs):
+ return header + self._format_native_types(**kwargs)
+
+ def __contains__(self, key):
+ try:
+ res = self.get_loc(key)
+ return np.isscalar(res) or type(res) == slice
+ except (KeyError, TypeError):
+ return False
+
+ @cache_readonly
+ def inferred_freq(self):
+ try:
+ return infer_freq(self)
+ except ValueError:
+ return None
+
+ # Try to run function on index first, and then on elements of index
+ # Especially important for group-by functionality
+ def map(self, f):
+ try:
+ result = f(self)
+ if not isinstance(result, (np.ndarray, Index)):
+ raise TypeError
+ return result
+ except Exception:
+ return _algos.arrmap_object(self.asobject.values, f)
+
+ def order(self, return_indexer=False, ascending=True):
+ """
+ Return sorted copy of Index
+ """
+ if return_indexer:
+ _as = self.argsort()
+ if not ascending:
+ _as = _as[::-1]
+ sorted_index = self.take(_as)
+ return sorted_index, _as
+ else:
+ sorted_values = np.sort(self.values)
+ if not ascending:
+ sorted_values = sorted_values[::-1]
+ attribs = self._get_attributes_dict()
+ attribs['freq'] = None
+ return self._simple_new(sorted_values, **attribs)
+
+ def take(self, indices, axis=0):
+ """
+ Analogous to ndarray.take
+ """
+ maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
+ if isinstance(maybe_slice, slice):
+ return self[maybe_slice]
+ return super(DatetimeIndexOpsMixin, self).take(indices, axis)
+
+ def slice_locs(self, start=None, end=None):
+ """
+ Index.slice_locs, customized to handle partial ISO-8601 string slicing
+ """
+ if isinstance(start, compat.string_types) or isinstance(end, compat.string_types):
+
+ if self.is_monotonic:
+ try:
+ if start:
+ start_loc = self._get_string_slice(start).start
+ else:
+ start_loc = 0
+
+ if end:
+ end_loc = self._get_string_slice(end).stop
+ else:
+ end_loc = len(self)
+
+ return start_loc, end_loc
+ except KeyError:
+ pass
+
+ else:
+ # can't use a slice indexer because we are not sorted!
+ # so create an indexer directly
+ try:
+ if start:
+ start_loc = self._get_string_slice(start,
+ use_rhs=False)
+ else:
+ start_loc = np.arange(len(self))
+
+ if end:
+ end_loc = self._get_string_slice(end, use_lhs=False)
+ else:
+ end_loc = np.arange(len(self))
+
+ return start_loc, end_loc
+ except KeyError:
+ pass
+
+ if isinstance(start, time) or isinstance(end, time):
+ raise KeyError('Cannot use slice_locs with time slice keys')
+
+ return Index.slice_locs(self, start, end)
+
+ def get_duplicates(self):
+ values = Index.get_duplicates(self)
+ return self._simple_new(values)
+
+ @cache_readonly
+ def hasnans(self):
+ """ return if I have any nans; enables various perf speedups """
+ return (self.asi8 == tslib.iNaT).any()
+
+ @property
+ def asobject(self):
+ from pandas.core.index import Index
+ return Index(self._box_values(self.asi8), name=self.name, dtype=object)
+
+ def tolist(self):
+ """
+ return a list of the underlying data
+ """
+ return list(self.asobject)
+
+ def min(self, axis=None):
+ """
+ return the minimum value of the Index
+
+ See also
+ --------
+ numpy.ndarray.min
+ """
+ try:
+ i8 = self.asi8
+
+ # quick check
+ if len(i8) and self.is_monotonic:
+ if i8[0] != tslib.iNaT:
+ return self._box_func(i8[0])
+
+ if self.hasnans:
+ mask = i8 == tslib.iNaT
+ min_stamp = self[~mask].asi8.min()
+ else:
+ min_stamp = i8.min()
+ return self._box_func(min_stamp)
+ except ValueError:
+ return self._na_value
+
+ def argmin(self, axis=None):
+ """
+ return a ndarray of the minimum argument indexer
+
+ See also
+ --------
+ numpy.ndarray.argmin
+ """
+
+ i8 = self.asi8
+ if self.hasnans:
+ mask = i8 == tslib.iNaT
+ if mask.all():
+ return -1
+ i8 = i8.copy()
+ i8[mask] = np.iinfo('int64').max
+ return i8.argmin()
+
+ def max(self, axis=None):
+ """
+ return the maximum value of the Index
+
+ See also
+ --------
+ numpy.ndarray.max
+ """
+ try:
+ i8 = self.asi8
+
+ # quick check
+ if len(i8) and self.is_monotonic:
+ if i8[-1] != tslib.iNaT:
+ return self._box_func(i8[-1])
+
+ if self.hasnans:
+ mask = i8 == tslib.iNaT
+ max_stamp = self[~mask].asi8.max()
+ else:
+ max_stamp = i8.max()
+ return self._box_func(max_stamp)
+ except ValueError:
+ return self._na_value
+
+ def argmax(self, axis=None):
+ """
+ return a ndarray of the maximum argument indexer
+
+ See also
+ --------
+ numpy.ndarray.argmax
+ """
+
+ i8 = self.asi8
+ if self.hasnans:
+ mask = i8 == tslib.iNaT
+ if mask.all():
+ return -1
+ i8 = i8.copy()
+ i8[mask] = 0
+ return i8.argmax()
+
+ @property
+ def _formatter_func(self):
+ """
+ Format function to convert value to representation
+ """
+ return str
+
+ def _format_footer(self):
+ raise NotImplementedError
+
+ def __unicode__(self):
+ formatter = self._formatter_func
+ summary = str(self.__class__) + '\n'
+
+ n = len(self)
+ if n == 0:
+ pass
+ elif n == 1:
+ first = formatter(self[0])
+ summary += '[%s]\n' % first
+ elif n == 2:
+ first = formatter(self[0])
+ last = formatter(self[-1])
+ summary += '[%s, %s]\n' % (first, last)
+ else:
+ first = formatter(self[0])
+ last = formatter(self[-1])
+ summary += '[%s, ..., %s]\n' % (first, last)
+
+ summary += self._format_footer()
+ return summary
+
+ @cache_readonly
+ def _resolution(self):
+ from pandas.tseries.frequencies import Resolution
+ return Resolution.get_reso_from_freq(self.freqstr)
+
+ @cache_readonly
+ def resolution(self):
+ """
+ Returns day, hour, minute, second, millisecond or microsecond
+ """
+ from pandas.tseries.frequencies import get_reso_string
+ return get_reso_string(self._resolution)
+
+ def _add_datelike(self, other):
+ return NotImplemented
+
+ def _sub_datelike(self, other):
+ return NotImplemented
+
+ def __add__(self, other):
+ from pandas.core.index import Index
+ from pandas.tseries.tdi import TimedeltaIndex
+ from pandas.tseries.offsets import DateOffset
+ if isinstance(other, TimedeltaIndex):
+ return self._add_delta(other)
+ elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
+ if hasattr(other,'_add_delta'):
+ return other._add_delta(self)
+ raise TypeError("cannot perform a numeric operation with a TimedeltaIndex and {typ}".format(typ=type(other)))
+ elif isinstance(other, Index):
+ return self.union(other)
+ elif isinstance(other, (DateOffset, timedelta, np.timedelta64, tslib.Timedelta)):
+ return self._add_delta(other)
+ elif com.is_integer(other):
+ return self.shift(other)
+ elif isinstance(other, (tslib.Timestamp, datetime)):
+ return self._add_datelike(other)
+ else: # pragma: no cover
+ return NotImplemented
+
+ def __sub__(self, other):
+ from pandas.core.index import Index
+ from pandas.tseries.tdi import TimedeltaIndex
+ from pandas.tseries.offsets import DateOffset
+ if isinstance(other, TimedeltaIndex):
+ return self._add_delta(-other)
+ elif isinstance(other, Index):
+ return self.diff(other)
+ elif isinstance(other, (DateOffset, timedelta, np.timedelta64, tslib.Timedelta)):
+ return self._add_delta(-other)
+ elif com.is_integer(other):
+ return self.shift(-other)
+ elif isinstance(other, (tslib.Timestamp, datetime)):
+ return self._sub_datelike(other)
+ else: # pragma: no cover
+ return NotImplemented
+
+ __iadd__ = __add__
+ __isub__ = __sub__
+
+ def _add_delta(self, other):
+ return NotImplemented
+
+ def _add_delta_td(self, other):
+ # add a delta of a timedeltalike
+ # return the i8 result view
+
+ inc = tslib._delta_to_nanoseconds(other)
+ mask = self.asi8 == tslib.iNaT
+ new_values = (self.asi8 + inc).view(self.dtype)
+ new_values[mask] = tslib.iNaT
+ return new_values.view(self.dtype)
+
+ def _add_delta_tdi(self, other):
+ # add a delta of a TimedeltaIndex
+ # return the i8 result view
+
+ # delta operation
+ if not len(self) == len(other):
+ raise ValueError("cannot add indices of unequal length")
+
+ self_i8 = self.asi8
+ other_i8 = other.asi8
+ mask = (self_i8 == tslib.iNaT) | (other_i8 == tslib.iNaT)
+ new_values = self_i8 + other_i8
+ new_values[mask] = tslib.iNaT
+ return new_values.view(self.dtype)
+
+ def isin(self, values):
+ """
+ Compute boolean array of whether each index value is found in the
+ passed set of values
+
+ Parameters
+ ----------
+ values : set or sequence of values
+
+ Returns
+ -------
+ is_contained : ndarray (boolean dtype)
+ """
+ if not isinstance(values, type(self)):
+ try:
+ values = type(self)(values)
+ except ValueError:
+ return self.asobject.isin(values)
+
+ value_set = set(values.asi8)
+ return lib.ismember(self.asi8, value_set)
+
+ def shift(self, n, freq=None):
+ """
+ Specialized shift which produces a DatetimeIndex
+
+ Parameters
+ ----------
+ n : int
+ Periods to shift by
+ freq : DateOffset or timedelta-like, optional
+
+ Returns
+ -------
+ shifted : DatetimeIndex
+ """
+ if freq is not None and freq != self.freq:
+ if isinstance(freq, compat.string_types):
+ freq = to_offset(freq)
+ result = Index.shift(self, n, freq)
+
+ if hasattr(self,'tz'):
+ result.tz = self.tz
+
+ return result
+
+ if n == 0:
+ # immutable so OK
+ return self
+
+ if self.freq is None:
+ raise ValueError("Cannot shift with no freq")
+
+ start = self[0] + n * self.freq
+ end = self[-1] + n * self.freq
+ attribs = self._get_attributes_dict()
+ attribs['start'] = start
+ attribs['end'] = end
+ return type(self)(**attribs)
+
+ def unique(self):
+ """
+ Index.unique with handling for DatetimeIndex/PeriodIndex metadata
+
+ Returns
+ -------
+ result : DatetimeIndex or PeriodIndex
+ """
+ from pandas.core.index import Int64Index
+ result = Int64Index.unique(self)
+ return self._simple_new(result, name=self.name, freq=self.freq,
+ tz=getattr(self, 'tz', None))
+
+ def repeat(self, repeats, axis=None):
+ """
+ Analogous to ndarray.repeat
+ """
+ return self._simple_new(self.values.repeat(repeats),
+ name=self.name)
+
+
diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py
index d3b86d73dca3a..227af42f07411 100644
--- a/pandas/tseries/common.py
+++ b/pandas/tseries/common.py
@@ -3,7 +3,7 @@
import numpy as np
from pandas.core.base import PandasDelegate
from pandas.core import common as com
-from pandas import Series, DatetimeIndex, PeriodIndex
+from pandas import Series, DatetimeIndex, PeriodIndex, TimedeltaIndex
from pandas import lib, tslib
def is_datetimelike(data):
@@ -17,7 +17,8 @@ def is_datetimelike(data):
def maybe_to_datetimelike(data, copy=False):
"""
- return a DelegatedClass of a Series that is datetimelike (e.g. datetime64[ns] dtype or a Series of Periods)
+ return a DelegatedClass of a Series that is datetimelike
+ (e.g. datetime64[ns],timedelta64[ns] dtype or a Series of Periods)
raise TypeError if this is not possible.
Parameters
@@ -37,10 +38,14 @@ def maybe_to_datetimelike(data, copy=False):
index = data.index
if issubclass(data.dtype.type, np.datetime64):
- return DatetimeProperties(DatetimeIndex(data, copy=copy), index)
+ return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), index)
+ elif issubclass(data.dtype.type, np.timedelta64):
+ return TimedeltaProperties(TimedeltaIndex(data, copy=copy, freq='infer'), index)
else:
if com.is_period_arraylike(data):
return PeriodProperties(PeriodIndex(data, copy=copy), index)
+ if com.is_datetime_arraylike(data):
+ return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), index)
raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data)))
@@ -57,6 +62,8 @@ def _delegate_property_get(self, name):
if isinstance(result, np.ndarray):
if com.is_integer_dtype(result):
result = result.astype('int64')
+ elif not com.is_list_like(result):
+ return result
# return the result as a Series, which is by definition a copy
result = Series(result, index=self.index)
@@ -71,6 +78,21 @@ def _delegate_property_set(self, name, value, *args, **kwargs):
raise ValueError("modifications to a property of a datetimelike object are not "
"supported. Change values on the original.")
+ def _delegate_method(self, name, *args, **kwargs):
+ method = getattr(self.values, name)
+ result = method(*args, **kwargs)
+
+ if not com.is_list_like(result):
+ return result
+
+ result = Series(result, index=self.index)
+
+ # setting this object will show a SettingWithCopyWarning/Error
+ result.is_copy = ("modifications to a method of a datetimelike object are not "
+ "supported and are discarded. Change values on the original.")
+
+ return result
+
class DatetimeProperties(Properties):
"""
@@ -86,9 +108,42 @@ class DatetimeProperties(Properties):
Raises TypeError if the Series does not contain datetimelike values.
"""
+ def to_pydatetime(self):
+ return self.values.to_pydatetime()
+
DatetimeProperties._add_delegate_accessors(delegate=DatetimeIndex,
accessors=DatetimeIndex._datetimelike_ops,
typ='property')
+DatetimeProperties._add_delegate_accessors(delegate=DatetimeIndex,
+ accessors=["to_period","tz_localize","tz_convert"],
+ typ='method')
+
+class TimedeltaProperties(Properties):
+ """
+ Accessor object for datetimelike properties of the Series values.
+
+ Examples
+ --------
+ >>> s.dt.hours
+ >>> s.dt.seconds
+
+ Returns a Series indexed like the original Series.
+ Raises TypeError if the Series does not contain datetimelike values.
+ """
+
+ def to_pytimedelta(self):
+ return self.values.to_pytimedelta()
+
+ @property
+ def components(self):
+ return self.values.components
+
+TimedeltaProperties._add_delegate_accessors(delegate=TimedeltaIndex,
+ accessors=TimedeltaIndex._datetimelike_ops,
+ typ='property')
+TimedeltaProperties._add_delegate_accessors(delegate=TimedeltaIndex,
+ accessors=["to_pytimedelta"],
+ typ='method')
class PeriodProperties(Properties):
"""
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 873d24530d1d9..7cd286129e936 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -661,13 +661,17 @@ def infer_freq(index, warn=True):
if isinstance(index, com.ABCSeries):
values = index.values
- if not (com.is_datetime64_dtype(index.values) or values.dtype == object):
+ if not (com.is_datetime64_dtype(index.values) or com.is_timedelta64_dtype(index.values) or values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype on a Series of {0}".format(index.dtype))
index = values
if com.is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
+ elif isinstance(index, pd.TimedeltaIndex):
+ inferer = _TimedeltaFrequencyInferer(index, warn=warn)
+ return inferer.get_freq()
+
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index type {0}".format(type(index)))
@@ -694,8 +698,9 @@ def __init__(self, index, warn=True):
self.index = index
self.values = np.asarray(index).view('i8')
- if index.tz is not None:
- self.values = tslib.tz_convert(self.values, 'UTC', index.tz)
+ if hasattr(index,'tz'):
+ if index.tz is not None:
+ self.values = tslib.tz_convert(self.values, 'UTC', index.tz)
self.warn = warn
@@ -892,6 +897,18 @@ def _get_wom_rule(self):
import pandas.core.algorithms as algos
+class _TimedeltaFrequencyInferer(_FrequencyInferer):
+
+ def _infer_daily_rule(self):
+ if self.is_unique:
+ days = self.deltas[0] / _ONE_DAY
+ if days % 7 == 0:
+ # Weekly
+ alias = _weekday_rule_aliases[self.rep_stamp.weekday()]
+ return _maybe_add_count('W-%s' % alias, days / 7)
+ else:
+ return _maybe_add_count('D', days)
+
def _maybe_add_count(base, count):
if count > 1:
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index e2cb8216bb270..45e851afb49e0 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -15,9 +15,9 @@
import pandas.compat as compat
from pandas.compat import u
from pandas.tseries.frequencies import (
- infer_freq, to_offset, get_period_alias,
+ to_offset, get_period_alias,
Resolution)
-from pandas.core.base import DatetimeIndexOpsMixin
+from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly, deprecate_kwarg
@@ -58,22 +58,6 @@ def f(self):
return property(f)
-def _join_i8_wrapper(joinf, with_indexers=True):
- @staticmethod
- def wrapper(left, right):
- if isinstance(left, (np.ndarray, Index, ABCSeries)):
- left = left.view('i8')
- if isinstance(right, (np.ndarray, Index, ABCSeries)):
- right = right.view('i8')
- results = joinf(left, right)
- if with_indexers:
- join_index, left_indexer, right_indexer = results
- join_index = join_index.view('M8[ns]')
- return join_index, left_indexer, right_indexer
- return results
- return wrapper
-
-
def _dt_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
@@ -162,6 +146,10 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index):
_typ = 'datetimeindex'
_join_precedence = 10
+
+ def _join_i8_wrapper(joinf, **kwargs):
+ return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]', **kwargs)
+
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
@@ -176,9 +164,6 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index):
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
- # structured array cache for datetime fields
- _sarr_cache = None
-
_engine_type = _index.DatetimeEngine
tz = None
@@ -188,7 +173,7 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index):
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter',
'date','time','microsecond','nanosecond','is_month_start','is_month_end',
- 'is_quarter_start','is_quarter_end','is_year_start','is_year_end']
+ 'is_quarter_start','is_quarter_end','is_year_start','is_year_end','tz','freq']
_is_numeric_dtype = False
@@ -301,6 +286,8 @@ def __new__(cls, data=None,
# make sure that we have a index/ndarray like (and not a Series)
if isinstance(subarr, ABCSeries):
subarr = subarr.values
+ if subarr.dtype == np.object_:
+ subarr = tools.to_datetime(subarr, box=False)
except ValueError:
# tz aware
@@ -492,7 +479,7 @@ def _local_timestamps(self):
return result.take(reverse)
@classmethod
- def _simple_new(cls, values, name=None, freq=None, tz=None):
+ def _simple_new(cls, values, name=None, freq=None, tz=None, **kwargs):
if not getattr(values,'dtype',None):
values = np.array(values,copy=False)
if values.dtype != _NS_DTYPE:
@@ -628,15 +615,24 @@ def __setstate__(self, state):
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
+ def _sub_datelike(self, other):
+ # subtract a datetime from myself, yielding a TimedeltaIndex
+
+ from pandas import TimedeltaIndex
+ other = Timestamp(other)
+ i8 = self.asi8
+ result = i8 - other.value
+ if self.hasnans:
+ mask = i8 == tslib.iNaT
+ result[mask] = tslib.iNaT
+ return TimedeltaIndex(result,name=self.name,copy=False)
+
def _add_delta(self, delta):
- if isinstance(delta, (Tick, timedelta)):
- inc = offsets._delta_to_nanoseconds(delta)
- mask = self.asi8 == tslib.iNaT
- new_values = (self.asi8 + inc).view(_NS_DTYPE)
- new_values[mask] = tslib.iNaT
- new_values = new_values.view(_NS_DTYPE)
- elif isinstance(delta, np.timedelta64):
- new_values = self.to_series() + delta
+ from pandas import TimedeltaIndex
+ if isinstance(delta, (Tick, timedelta, np.timedelta64)):
+ new_values = self._add_delta_td(delta)
+ elif isinstance(delta, TimedeltaIndex):
+ new_values = self._add_delta_tdi(delta)
else:
new_values = self.astype('O') + delta
tz = 'UTC' if self.tz is not None else None
@@ -646,16 +642,6 @@ def _add_delta(self, delta):
result = result.tz_convert(self.tz)
return result
- def __contains__(self, key):
- try:
- res = self.get_loc(key)
- return np.isscalar(res) or type(res) == slice
- except (KeyError, TypeError):
- return False
-
- def _format_with_header(self, header, **kwargs):
- return header + self._format_native_types(**kwargs)
-
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
data = self.asobject
@@ -665,35 +651,9 @@ def _format_native_types(self, na_rep=u('NaT'),
date_format=date_format,
justify='all').get_result()
- def isin(self, values):
- """
- Compute boolean array of whether each index value is found in the
- passed set of values
-
- Parameters
- ----------
- values : set or sequence of values
-
- Returns
- -------
- is_contained : ndarray (boolean dtype)
- """
- if not isinstance(values, DatetimeIndex):
- try:
- values = DatetimeIndex(values)
- except ValueError:
- return self.asobject.isin(values)
-
- value_set = set(values.asi8)
- return lib.ismember(self.asi8, value_set)
-
def to_datetime(self, dayfirst=False):
return self.copy()
- def groupby(self, f):
- objs = self.asobject.values
- return _algos.groupby_object(objs, f)
-
def summary(self, name=None):
if len(self) > 0:
index_summary = ', %s to %s' % (com.pprint_thing(self[0]),
@@ -710,9 +670,9 @@ def summary(self, name=None):
return result
- def get_duplicates(self):
- values = Index.get_duplicates(self)
- return DatetimeIndex(values)
+ def _format_footer(self):
+ tagline = 'Length: %d, Freq: %s, Timezone: %s'
+ return tagline % (len(self), self.freqstr, self.tz)
def astype(self, dtype):
dtype = np.dtype(dtype)
@@ -755,10 +715,15 @@ def to_series(self, keep_tz=False):
-------
Series
"""
- return super(DatetimeIndex, self).to_series(keep_tz=keep_tz)
+ from pandas import Series
+ return Series(self._to_embed(keep_tz), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
- """ return an array repr of this object, potentially casting to object """
+ """
+ return an array repr of this object, potentially casting to object
+
+ This is for internal compat
+ """
if keep_tz and self.tz is not None and str(self.tz) != 'UTC':
return self.asobject.values
return self.values
@@ -790,23 +755,6 @@ def to_period(self, freq=None):
return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz)
- def order(self, return_indexer=False, ascending=True):
- """
- Return sorted copy of Index
- """
- if return_indexer:
- _as = self.argsort()
- if not ascending:
- _as = _as[::-1]
- sorted_index = self.take(_as)
- return sorted_index, _as
- else:
- sorted_values = np.sort(self.values)
- if not ascending:
- sorted_values = sorted_values[::-1]
- return self._simple_new(sorted_values, self.name, None,
- self.tz)
-
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
@@ -831,56 +779,6 @@ def snap(self, freq='S'):
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
- def shift(self, n, freq=None):
- """
- Specialized shift which produces a DatetimeIndex
-
- Parameters
- ----------
- n : int
- Periods to shift by
- freq : DateOffset or timedelta-like, optional
-
- Returns
- -------
- shifted : DatetimeIndex
- """
- if freq is not None and freq != self.offset:
- if isinstance(freq, compat.string_types):
- freq = to_offset(freq)
- result = Index.shift(self, n, freq)
- result.tz = self.tz
-
- return result
-
- if n == 0:
- # immutable so OK
- return self
-
- if self.offset is None:
- raise ValueError("Cannot shift with no offset")
-
- start = self[0] + n * self.offset
- end = self[-1] + n * self.offset
- return DatetimeIndex(start=start, end=end, freq=self.offset,
- name=self.name, tz=self.tz)
-
- def repeat(self, repeats, axis=None):
- """
- Analogous to ndarray.repeat
- """
- return DatetimeIndex(self.values.repeat(repeats),
- name=self.name)
-
- def take(self, indices, axis=0):
- """
- Analogous to ndarray.take
- """
- maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
- if isinstance(maybe_slice, slice):
- return self[maybe_slice]
- return super(DatetimeIndex, self).take(indices, axis)
-
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
@@ -1339,52 +1237,6 @@ def slice_indexer(self, start=None, end=None, step=None):
return Index.slice_indexer(self, start, end, step)
- def slice_locs(self, start=None, end=None):
- """
- Index.slice_locs, customized to handle partial ISO-8601 string slicing
- """
- if isinstance(start, compat.string_types) or isinstance(end, compat.string_types):
-
- if self.is_monotonic:
- try:
- if start:
- start_loc = self._get_string_slice(start).start
- else:
- start_loc = 0
-
- if end:
- end_loc = self._get_string_slice(end).stop
- else:
- end_loc = len(self)
-
- return start_loc, end_loc
- except KeyError:
- pass
-
- else:
- # can't use a slice indexer because we are not sorted!
- # so create an indexer directly
- try:
- if start:
- start_loc = self._get_string_slice(start,
- use_rhs=False)
- else:
- start_loc = np.arange(len(self))
-
- if end:
- end_loc = self._get_string_slice(end, use_lhs=False)
- else:
- end_loc = np.arange(len(self))
-
- return start_loc, end_loc
- except KeyError:
- pass
-
- if isinstance(start, time) or isinstance(end, time):
- raise KeyError('Cannot use slice_locs with time slice keys')
-
- return Index.slice_locs(self, start, end)
-
def __getitem__(self, key):
getitem = self._data.__getitem__
if np.isscalar(key):
@@ -1411,17 +1263,6 @@ def __getitem__(self, key):
return self._simple_new(result, self.name, new_offset, self.tz)
- # Try to run function on index first, and then on elements of index
- # Especially important for group-by functionality
- def map(self, f):
- try:
- result = f(self)
- if not isinstance(result, (np.ndarray, Index)):
- raise TypeError
- return result
- except Exception:
- return _algos.arrmap_object(self.asobject.values, f)
-
# alias to offset
def _get_freq(self):
return self.offset
@@ -1430,13 +1271,6 @@ def _set_freq(self, value):
self.offset = value
freq = property(fget=_get_freq, fset=_set_freq, doc="get/set the frequncy of the Index")
- @cache_readonly
- def inferred_freq(self):
- try:
- return infer_freq(self)
- except ValueError:
- return None
-
@property
def freqstr(self):
""" return the frequency object as a string if its set, otherwise None """
@@ -1692,8 +1526,8 @@ def tz_localize(self, tz, ambiguous='raise'):
else:
tz = tslib.maybe_get_tz(tz)
# Convert to UTC
-
- new_dates = tslib.tz_localize_to_utc(self.asi8, tz,
+
+ new_dates = tslib.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 942a2f445fd48..b4d8a6547950d 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -8,7 +8,7 @@
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
-from pandas.core.base import DatetimeIndexOpsMixin
+from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
@@ -16,7 +16,7 @@
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object, ABCSeries)
from pandas import compat
-from pandas.lib import Timestamp
+from pandas.lib import Timestamp, Timedelta
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
@@ -61,7 +61,6 @@ class Period(PandasObject):
minute : int, default 0
second : int, default 0
"""
- _typ = 'periodindex'
__slots__ = ['freq', 'ordinal']
_comparables = ['name','freqstr']
@@ -171,7 +170,7 @@ def __hash__(self):
return hash((self.ordinal, self.freq))
def _add_delta(self, other):
- if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
+ if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
@@ -198,7 +197,7 @@ def _add_delta(self, other):
def __add__(self, other):
if isinstance(other, (timedelta, np.timedelta64,
- offsets.Tick, offsets.DateOffset)):
+ offsets.Tick, offsets.DateOffset, Timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
if self.ordinal == tslib.iNaT:
@@ -211,7 +210,7 @@ def __add__(self, other):
def __sub__(self, other):
if isinstance(other, (timedelta, np.timedelta64,
- offsets.Tick, offsets.DateOffset)):
+ offsets.Tick, offsets.DateOffset, Timedelta)):
neg_other = -other
return self + neg_other
elif com.is_integer(other):
@@ -606,10 +605,12 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
+ _typ = 'periodindex'
_attributes = ['name','freq']
_datetimelike_ops = ['year','month','day','hour','minute','second',
- 'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'qyear']
+ 'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'qyear', 'freq']
_is_numeric_dtype = False
+ freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
@@ -839,17 +840,6 @@ def to_datetime(self, dayfirst=False):
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
- # Try to run function on index first, and then on elements of index
- # Especially important for group-by functionality
- def map(self, f):
- try:
- result = f(self)
- if not isinstance(result, (np.ndarray, Index)):
- raise TypeError
- return result
- except Exception:
- return _algos.arrmap_object(self.asobject.values, f)
-
def _get_object_array(self):
freq = self.freq
return np.array([ Period._from_ordinal(ordinal=x, freq=freq) for x in self.values], copy=False)
@@ -902,7 +892,7 @@ def to_timestamp(self, freq=None, how='start'):
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _add_delta(self, other):
- if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
+ if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
@@ -1129,9 +1119,6 @@ def __getitem__(self, key):
return PeriodIndex(result, name=self.name, freq=self.freq)
- def _format_with_header(self, header, **kwargs):
- return header + self._format_native_types(**kwargs)
-
def _format_native_types(self, na_rep=u('NaT'), **kwargs):
values = np.array(list(self), dtype=object)
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
new file mode 100644
index 0000000000000..01a89a1965549
--- /dev/null
+++ b/pandas/tseries/tdi.py
@@ -0,0 +1,979 @@
+""" implement the TimedeltaIndex """
+
+import operator
+import datetime
+from datetime import timedelta
+import numpy as np
+
+from pandas.core.common import (ABCSeries, _TD_DTYPE, _INT64_DTYPE,
+ is_timedelta64_dtype, _maybe_box,
+ _values_from_object, isnull)
+from pandas.core.index import Index, Int64Index
+import pandas.compat as compat
+from pandas.compat import u
+from pandas.core.base import PandasObject
+from pandas.util.decorators import cache_readonly
+from pandas.tseries.frequencies import to_offset
+import pandas.core.common as com
+from pandas.tseries import timedeltas
+from pandas.tseries.base import DatetimeIndexOpsMixin
+from pandas.tseries.timedeltas import to_timedelta, _coerce_scalar_to_timedelta_type
+import pandas.tseries.offsets as offsets
+from pandas.tseries.offsets import Tick, DateOffset
+
+import pandas.lib as lib
+import pandas.tslib as tslib
+import pandas.algos as _algos
+import pandas.index as _index
+
+Timedelta = tslib.Timedelta
+
+_resolution_map = {
+ 'ns' : offsets.Nano,
+ 'us' : offsets.Micro,
+ 'ms' : offsets.Milli,
+ 's' : offsets.Second,
+ 'm' : offsets.Minute,
+ 'h' : offsets.Hour,
+ 'D' : offsets.Day,
+ }
+
+def _td_index_cmp(opname, nat_result=False):
+ """
+ Wrap comparison operations to convert timedelta-like to timedelta64
+ """
+ def wrapper(self, other):
+ func = getattr(super(TimedeltaIndex, self), opname)
+ if _is_convertible_to_td(other):
+ other = _to_m8(other)
+ result = func(other)
+ if com.isnull(other):
+ result.fill(nat_result)
+ else:
+ if not com.is_list_like(other):
+ raise TypeError("cannot compare a TimedeltaIndex with type {0}".format(type(other)))
+
+ other = TimedeltaIndex(other).values
+ result = func(other)
+ result = _values_from_object(result)
+
+ if isinstance(other, Index):
+ o_mask = other.values.view('i8') == tslib.iNaT
+ else:
+ o_mask = other.view('i8') == tslib.iNaT
+
+ if o_mask.any():
+ result[o_mask] = nat_result
+
+ mask = self.asi8 == tslib.iNaT
+ if mask.any():
+ result[mask] = nat_result
+
+ # support of bool dtype indexers
+ if com.is_bool_dtype(result):
+ return result
+ return Index(result)
+
+ return wrapper
+
+class TimedeltaIndex(DatetimeIndexOpsMixin, Int64Index):
+ """
+ Immutable ndarray of timedelta64 data, represented internally as int64, and
+ which can be boxed to timedelta objects
+
+ Parameters
+ ----------
+ data : array-like (1-dimensional), optional
+ Optional timedelta-like data to construct index with
+ unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
+ which is an integer/float number
+ freq: a frequency for the index, optional
+ copy : bool
+ Make a copy of input ndarray
+ start : starting value, timedelta-like, optional
+ If data is None, start is used as the start point in generating regular
+ timedelta data.
+ periods : int, optional, > 0
+ Number of periods to generate, if generating index. Takes precedence
+ over end argument
+ end : end time, timedelta-like, optional
+ If periods is none, generated index will extend to first conforming
+ time on or just past end argument
+ closed : string or None, default None
+ Make the interval closed with respect to the given frequency to
+ the 'left', 'right', or both sides (None)
+ name : object
+ Name to be stored in the index
+ """
+
+ _typ = 'timedeltaindex'
+ _join_precedence = 10
+ def _join_i8_wrapper(joinf, **kwargs):
+ return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='m8[ns]', **kwargs)
+
+ _inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
+ _outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
+ _left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
+ _left_indexer_unique = _join_i8_wrapper(
+ _algos.left_join_indexer_unique_int64, with_indexers=False)
+ _arrmap = None
+ _datetimelike_ops = ['days','hours','minutes','seconds','milliseconds','microseconds',
+ 'nanoseconds','freq','components']
+
+ __eq__ = _td_index_cmp('__eq__')
+ __ne__ = _td_index_cmp('__ne__', nat_result=True)
+ __lt__ = _td_index_cmp('__lt__')
+ __gt__ = _td_index_cmp('__gt__')
+ __le__ = _td_index_cmp('__le__')
+ __ge__ = _td_index_cmp('__ge__')
+
+ _engine_type = _index.TimedeltaEngine
+
+ _comparables = ['name','freq']
+ _attributes = ['name','freq']
+ _is_numeric_dtype = True
+ freq = None
+
+ def __new__(cls, data=None, unit=None,
+ freq=None, start=None, end=None, periods=None,
+ copy=False, name=None,
+ closed=None, verify_integrity=True, **kwargs):
+
+ if isinstance(data, TimedeltaIndex) and freq is None:
+ if copy:
+ data = data.copy()
+ return data
+
+ freq_infer = False
+ if not isinstance(freq, DateOffset):
+
+ # if a passed freq is None, don't infer automatically
+ if freq != 'infer':
+ freq = to_offset(freq)
+ else:
+ freq_infer = True
+ freq = None
+
+ if periods is not None:
+ if com.is_float(periods):
+ periods = int(periods)
+ elif not com.is_integer(periods):
+ raise ValueError('Periods must be a number, got %s' %
+ str(periods))
+
+ if data is None and freq is None:
+ raise ValueError("Must provide freq argument if no data is "
+ "supplied")
+
+ if data is None:
+ return cls._generate(start, end, periods, name, freq,
+ closed=closed)
+
+ if unit is not None:
+ data = to_timedelta(data, unit=unit, box=False)
+
+ if not isinstance(data, (np.ndarray, Index, ABCSeries)):
+ if np.isscalar(data):
+ raise ValueError('TimedeltaIndex() must be called with a '
+ 'collection of some kind, %s was passed'
+ % repr(data))
+
+ # convert if not already
+ if getattr(data,'dtype',None) != _TD_DTYPE:
+ data = to_timedelta(data,unit=unit,box=False)
+ elif copy:
+ data = np.array(data,copy=True)
+
+ # check that we are matching freqs
+ if verify_integrity and len(data) > 0:
+ if freq is not None and not freq_infer:
+ index = cls._simple_new(data, name=name)
+ inferred = index.inferred_freq
+ if inferred != freq.freqstr:
+ on_freq = cls._generate(index[0], None, len(index), name, freq)
+ if not np.array_equal(index.asi8, on_freq.asi8):
+ raise ValueError('Inferred frequency {0} from passed timedeltas does not '
+ 'conform to passed frequency {1}'.format(inferred, freq.freqstr))
+ index.freq = freq
+ return index
+
+ if freq_infer:
+ index = cls._simple_new(data, name=name)
+ inferred = index.inferred_freq
+ if inferred:
+ index.freq = to_offset(inferred)
+ return index
+
+ return cls._simple_new(data, name=name, freq=freq)
+
+ @classmethod
+ def _generate(cls, start, end, periods, name, offset, closed=None):
+ if com._count_not_none(start, end, periods) != 2:
+ raise ValueError('Must specify two of start, end, or periods')
+
+ if start is not None:
+ start = Timedelta(start)
+
+ if end is not None:
+ end = Timedelta(end)
+
+ left_closed = False
+ right_closed = False
+
+ if start is None and end is None:
+ if closed is not None:
+ raise ValueError("Closed has to be None if not both of start"
+ "and end are defined")
+
+ if closed is None:
+ left_closed = True
+ right_closed = True
+ elif closed == "left":
+ left_closed = True
+ elif closed == "right":
+ right_closed = True
+ else:
+ raise ValueError("Closed has to be either 'left', 'right' or None")
+
+ index = _generate_regular_range(start, end, periods, offset)
+ index = cls._simple_new(index, name=name, freq=offset)
+
+ if not left_closed:
+ index = index[1:]
+ if not right_closed:
+ index = index[:-1]
+
+ return index
+
+ @property
+ def _box_func(self):
+ return lambda x: Timedelta(x,unit='ns')
+
+ @classmethod
+ def _simple_new(cls, values, name=None, freq=None, **kwargs):
+ if not getattr(values,'dtype',None):
+ values = np.array(values,copy=False)
+ if values.dtype == np.object_:
+ values = tslib.array_to_timedelta64(values)
+ if values.dtype != _TD_DTYPE:
+ values = com._ensure_int64(values).view(_TD_DTYPE)
+
+ result = object.__new__(cls)
+ result._data = values
+ result.name = name
+ result.freq = freq
+ result._reset_identity()
+ return result
+
+ _na_value = tslib.NaT
+ """The expected NA value to use with this index."""
+
+ @property
+ def _formatter_func(self):
+ from pandas.core.format import _get_format_timedelta64
+ return _get_format_timedelta64(self, box=True)
+
+ def _format_footer(self):
+ tagline = 'Length: %d, Freq: %s'
+ return tagline % (len(self), self.freqstr)
+
+ def __setstate__(self, state):
+ """Necessary for making this object picklable"""
+ if isinstance(state, dict):
+ super(TimedeltaIndex, self).__setstate__(state)
+ else:
+ raise Exception("invalid pickle state")
+ _unpickle_compat = __setstate__
+
+ def _add_delta(self, delta):
+ if isinstance(delta, (Tick, timedelta, np.timedelta64)):
+ new_values = self._add_delta_td(delta)
+ elif isinstance(delta, TimedeltaIndex):
+ new_values = self._add_delta_tdi(delta)
+ else:
+ raise ValueError("cannot add the type {0} to a TimedeltaIndex".format(type(delta)))
+
+ result = TimedeltaIndex(new_values, freq='infer')
+ return result
+
+ def _evaluate_with_timedelta_like(self, other, op, opstr):
+
+ # allow division by a timedelta
+ if opstr in ['__div__','__truediv__']:
+ if _is_convertible_to_td(other):
+ other = Timedelta(other)
+ if isnull(other):
+ raise NotImplementedError("division by pd.NaT not implemented")
+
+ i8 = self.asi8
+ result = i8/float(other.value)
+ if self.hasnans:
+ mask = i8 == tslib.iNaT
+ result = result.astype('float64')
+ result[mask] = np.nan
+ return Index(result,name=self.name,copy=False)
+
+ raise TypeError("can only perform ops with timedelta like values")
+
+ def _add_datelike(self, other):
+
+ # adding a timedeltaindex to a datetimelike
+ from pandas import Timestamp, DatetimeIndex
+ other = Timestamp(other)
+ i8 = self.asi8
+ result = i8 + other.value
+ if self.hasnans:
+ mask = i8 == tslib.iNaT
+ result[mask] = tslib.iNaT
+ return DatetimeIndex(result,name=self.name,copy=False)
+
+ def _format_native_types(self, na_rep=u('NaT'),
+ date_format=None, **kwargs):
+ from pandas.core.format import Timedelta64Formatter
+ return Timedelta64Formatter(values=self,
+ nat_rep=na_rep,
+ justify='all').get_result()
+
+ def _get_field(self, m):
+
+ values = self.asi8
+ hasnans = self.hasnans
+ if hasnans:
+ result = np.empty(len(self), dtype='float64')
+ mask = values == tslib.iNaT
+ imask = ~mask
+ result.flat[imask] = np.array([ getattr(Timedelta(val),m) for val in values[imask] ])
+ result[mask] = np.nan
+ else:
+ result = np.array([ getattr(Timedelta(val),m) for val in values ],dtype='int64')
+ return result
+
+ @property
+ def days(self):
+ """ The number of integer days for each element """
+ return self._get_field('days')
+
+ @property
+ def hours(self):
+ """ The number of integer hours for each element """
+ return self._get_field('hours')
+
+ @property
+ def minutes(self):
+ """ The number of integer minutes for each element """
+ return self._get_field('minutes')
+
+ @property
+ def seconds(self):
+ """ The number of integer seconds for each element """
+ return self._get_field('seconds')
+
+ @property
+ def milliseconds(self):
+ """ The number of integer milliseconds for each element """
+ return self._get_field('milliseconds')
+
+ @property
+ def microseconds(self):
+ """ The number of integer microseconds for each element """
+ return self._get_field('microseconds')
+
+ @property
+ def nanoseconds(self):
+ """ The number of integer nanoseconds for each element """
+ return self._get_field('nanoseconds')
+
+ @property
+ def components(self):
+ """
+ Return a dataframe of the components of the Timedeltas
+
+ Returns
+ -------
+ a DataFrame
+ """
+ from pandas import DataFrame
+
+ columns = ['days','hours','minutes','seconds','milliseconds','microseconds','nanoseconds']
+ hasnans = self.hasnans
+ if hasnans:
+ def f(x):
+ if isnull(x):
+ return [np.nan]*len(columns)
+ return x.components
+ else:
+ def f(x):
+ return x.components
+
+ result = DataFrame([ f(x) for x in self ])
+ result.columns = columns
+ if not hasnans:
+ result = result.astype('int64')
+ return result
+
+ def summary(self, name=None):
+ formatter = self._formatter_func
+ if len(self) > 0:
+ index_summary = ', %s to %s' % (formatter(self[0]),
+ formatter(self[-1]))
+ else:
+ index_summary = ''
+
+ if name is None:
+ name = type(self).__name__
+ result = '%s: %s entries%s' % (com.pprint_thing(name),
+ len(self), index_summary)
+ if self.freq:
+ result += '\nFreq: %s' % self.freqstr
+
+ return result
+
+ def to_pytimedelta(self):
+ """
+ Return TimedeltaIndex as object ndarray of datetime.timedelta objects
+
+ Returns
+ -------
+ datetimes : ndarray
+ """
+ return tslib.ints_to_pytimedelta(self.asi8)
+
+ def astype(self, dtype):
+ dtype = np.dtype(dtype)
+
+ if dtype == np.object_:
+ return self.asobject
+ elif dtype == _INT64_DTYPE:
+ return self.asi8.copy()
+ elif dtype == _TD_DTYPE:
+ return self
+ elif dtype.kind == 'm':
+
+ # return an index (essentially this is division)
+ result = self.values.astype(dtype)
+ if self.hasnans:
+ result = result.astype('float64')
+ result[self.asi8 == tslib.iNaT] = np.nan
+ return Index(result,name=self.name)
+
+ return Index(result.astype('i8'),name=self.name)
+
+ else: # pragma: no cover
+ raise ValueError('Cannot cast TimedeltaIndex to dtype %s' % dtype)
+
+ def union(self, other):
+ """
+ Specialized union for TimedeltaIndex objects. If combine
+ overlapping ranges with the same DateOffset, will be much
+ faster than Index.union
+
+ Parameters
+ ----------
+ other : TimedeltaIndex or array-like
+
+ Returns
+ -------
+ y : Index or TimedeltaIndex
+ """
+ if _is_convertible_to_index(other):
+ try:
+ other = TimedeltaIndex(other)
+ except TypeError:
+ pass
+
+ this, other = self, other
+
+ if this._can_fast_union(other):
+ return this._fast_union(other)
+ else:
+ result = Index.union(this, other)
+ if isinstance(result, TimedeltaIndex):
+ if result.freq is None:
+ result.freq = to_offset(result.inferred_freq)
+ return result
+
+ def append(self, other):
+ """
+ Append a collection of Index options together
+
+ Parameters
+ ----------
+ other : Index or list/tuple of indices
+
+ Returns
+ -------
+ appended : Index
+ """
+ name = self.name
+ to_concat = [self]
+
+ if isinstance(other, (list, tuple)):
+ to_concat = to_concat + list(other)
+ else:
+ to_concat.append(other)
+
+ for obj in to_concat:
+ if isinstance(obj, Index) and obj.name != name:
+ name = None
+ break
+
+ to_concat = self._ensure_compat_concat(to_concat)
+ return Index(com._concat_compat(to_concat), name=name)
+
+ def join(self, other, how='left', level=None, return_indexers=False):
+ """
+ See Index.join
+ """
+ if _is_convertible_to_index(other):
+ try:
+ other = TimedeltaIndex(other)
+ except (TypeError, ValueError):
+ pass
+
+ return Index.join(self, other, how=how, level=level,
+ return_indexers=return_indexers)
+
+ def _wrap_joined_index(self, joined, other):
+ name = self.name if self.name == other.name else None
+ if (isinstance(other, TimedeltaIndex) and self.freq == other.freq
+ and self._can_fast_union(other)):
+ joined = self._shallow_copy(joined)
+ joined.name = name
+ return joined
+ else:
+ return self._simple_new(joined, name)
+
+ def _can_fast_union(self, other):
+ if not isinstance(other, TimedeltaIndex):
+ return False
+
+ freq = self.freq
+
+ if freq is None or freq != other.freq:
+ return False
+
+ if not self.is_monotonic or not other.is_monotonic:
+ return False
+
+ if len(self) == 0 or len(other) == 0:
+ return True
+
+ # to make our life easier, "sort" the two ranges
+ if self[0] <= other[0]:
+ left, right = self, other
+ else:
+ left, right = other, self
+
+ right_start = right[0]
+ left_end = left[-1]
+
+ # Only need to "adjoin", not overlap
+ return (right_start == left_end + freq) or right_start in left
+
+ def _fast_union(self, other):
+ if len(other) == 0:
+ return self.view(type(self))
+
+ if len(self) == 0:
+ return other.view(type(self))
+
+ # to make our life easier, "sort" the two ranges
+ if self[0] <= other[0]:
+ left, right = self, other
+ else:
+ left, right = other, self
+
+ left_start, left_end = left[0], left[-1]
+ right_end = right[-1]
+
+ # concatenate
+ if left_end < right_end:
+ loc = right.searchsorted(left_end, side='right')
+ right_chunk = right.values[loc:]
+ dates = com._concat_compat((left.values, right_chunk))
+ return self._shallow_copy(dates)
+ else:
+ return left
+
+ def __array_finalize__(self, obj):
+ if self.ndim == 0: # pragma: no cover
+ return self.item()
+
+ self.name = getattr(obj, 'name', None)
+ self.freq = getattr(obj, 'freq', None)
+ self._reset_identity()
+
+ def _wrap_union_result(self, other, result):
+ name = self.name if self.name == other.name else None
+ return self._simple_new(result, name=name, freq=None)
+
+ def intersection(self, other):
+ """
+ Specialized intersection for TimedeltaIndex objects. May be much faster
+ than Index.intersection
+
+ Parameters
+ ----------
+ other : TimedeltaIndex or array-like
+
+ Returns
+ -------
+ y : Index or TimedeltaIndex
+ """
+ if not isinstance(other, TimedeltaIndex):
+ try:
+ other = TimedeltaIndex(other)
+ except (TypeError, ValueError):
+ pass
+ result = Index.intersection(self, other)
+ return result
+
+ if len(self) == 0:
+ return self
+ if len(other) == 0:
+ return other
+ # to make our life easier, "sort" the two ranges
+ if self[0] <= other[0]:
+ left, right = self, other
+ else:
+ left, right = other, self
+
+ end = min(left[-1], right[-1])
+ start = right[0]
+
+ if end < start:
+ return type(self)(data=[])
+ else:
+ lslice = slice(*left.slice_locs(start, end))
+ left_chunk = left.values[lslice]
+ return self._shallow_copy(left_chunk)
+
+ def _possibly_promote(self, other):
+ if other.inferred_type == 'timedelta':
+ other = TimedeltaIndex(other)
+ return self, other
+
+ def get_value(self, series, key):
+ """
+ Fast lookup of value from 1-dimensional ndarray. Only use this if you
+ know what you're doing
+ """
+
+ if _is_convertible_to_td(key):
+ key = Timedelta(key)
+ return self.get_value_maybe_box(series, key)
+
+ try:
+ return _maybe_box(self, Index.get_value(self, series, key), series, key)
+ except KeyError:
+ try:
+ loc = self._get_string_slice(key)
+ return series[loc]
+ except (TypeError, ValueError, KeyError):
+ pass
+
+ try:
+ return self.get_value_maybe_box(series, key)
+ except (TypeError, ValueError, KeyError):
+ raise KeyError(key)
+
+ def get_value_maybe_box(self, series, key):
+ if not isinstance(key, Timedelta):
+ key = Timedelta(key)
+ values = self._engine.get_value(_values_from_object(series), key)
+ return _maybe_box(self, values, series, key)
+
+ def get_loc(self, key):
+ """
+ Get integer location for requested label
+
+ Returns
+ -------
+ loc : int
+ """
+ if _is_convertible_to_td(key):
+ key = Timedelta(key)
+ return self._engine.get_loc(key)
+
+ try:
+ return Index.get_loc(self, key)
+ except (KeyError, ValueError):
+ try:
+ return self._get_string_slice(key)
+ except (TypeError, KeyError, ValueError):
+ pass
+
+ try:
+ stamp = Timedelta(key)
+ return self._engine.get_loc(stamp)
+ except (KeyError, ValueError):
+ raise KeyError(key)
+
+ def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
+ freq = getattr(self, 'freqstr',
+ getattr(self, 'inferred_freq', None))
+
+ loc = self._partial_td_slice(key, freq, use_lhs=use_lhs,
+ use_rhs=use_rhs)
+ return loc
+
+ def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True):
+
+ # given a key, try to figure out a location for a partial slice
+ if not isinstance(key, compat.string_types):
+ return key
+
+ parsed = _coerce_scalar_to_timedelta_type(key, box=True)
+
+ is_monotonic = self.is_monotonic
+
+ # figure out the resolution of the passed td
+ # and round to it
+ reso = parsed.resolution
+ t1 = parsed.round(reso)
+ t2 = t1 + _resolution_map[reso]() - Timedelta(1,'ns')
+
+ stamps = self.asi8
+
+ if is_monotonic:
+
+ # we are out of range
+ if len(stamps) and (
+ (use_lhs and t1.value < stamps[0] and t2.value < stamps[0]) or (
+ (use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))):
+ raise KeyError
+
+ # a monotonic (sorted) series can be sliced
+ left = stamps.searchsorted(t1.value, side='left') if use_lhs else None
+ right = stamps.searchsorted(t2.value, side='right') if use_rhs else None
+
+ return slice(left, right)
+
+ lhs_mask = (stamps >= t1.value) if use_lhs else True
+ rhs_mask = (stamps <= t2.value) if use_rhs else True
+
+ # try to find a the dates
+ return (lhs_mask & rhs_mask).nonzero()[0]
+
+ def __getitem__(self, key):
+ getitem = self._data.__getitem__
+ if np.isscalar(key):
+ val = getitem(key)
+ return Timedelta(val)
+ else:
+ if com._is_bool_indexer(key):
+ key = np.asarray(key)
+ if key.all():
+ key = slice(0,None,None)
+ else:
+ key = lib.maybe_booleans_to_slice(key.view(np.uint8))
+
+ result = getitem(key)
+ if result.ndim > 1:
+ return result
+
+ return self._simple_new(result, self.name)
+
+ @property
+ def freqstr(self):
+ """ return the frequency object as a string if its set, otherwise None """
+ if self.freq is None:
+ return None
+ return self.freq
+
+ def searchsorted(self, key, side='left'):
+ if isinstance(key, (np.ndarray, Index)):
+ key = np.array(key, dtype=_TD_DTYPE, copy=False)
+ else:
+ key = _to_m8(key)
+
+ return self.values.searchsorted(key, side=side)
+
+ def is_type_compatible(self, typ):
+ return typ == self.inferred_type or typ == 'timedelta'
+
+ @property
+ def inferred_type(self):
+ return 'timedelta64'
+
+ @property
+ def dtype(self):
+ return _TD_DTYPE
+
+ @property
+ def is_all_dates(self):
+ return True
+
+ def equals(self, other):
+ """
+ Determines if two Index objects contain the same elements.
+ """
+ if self.is_(other):
+ return True
+
+ if (not hasattr(other, 'inferred_type') or
+ other.inferred_type != 'timedelta64'):
+ try:
+ other = TimedeltaIndex(other)
+ except:
+ return False
+
+ return np.array_equal(self.asi8, other.asi8)
+
+ def insert(self, loc, item):
+ """
+ Make new Index inserting new item at location
+
+ Parameters
+ ----------
+ loc : int
+ item : object
+ if not either a Python datetime or a numpy integer-like, returned
+ Index dtype will be object rather than datetime.
+
+ Returns
+ -------
+ new_index : Index
+ """
+
+ # try to convert if possible
+ if _is_convertible_to_td(item):
+ try:
+ item = Timedelta(item)
+ except:
+ pass
+
+ freq = None
+ if isinstance(item, Timedelta):
+
+ # check freq can be preserved on edge cases
+ if self.freq is not None:
+ if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
+ freq = self.freq
+ elif (loc == len(self)) and item - self.freq == self[-1]:
+ freq = self.freq
+ item = _to_m8(item)
+
+ try:
+ new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
+ self[loc:].asi8))
+ return TimedeltaIndex(new_tds, name=self.name, freq=freq)
+
+ except (AttributeError, TypeError):
+
+ # fall back to object index
+ if isinstance(item,compat.string_types):
+ return self.asobject.insert(loc, item)
+ raise TypeError("cannot insert TimedeltaIndex with incompatible label")
+
+ def delete(self, loc):
+ """
+ Make a new DatetimeIndex with passed location(s) deleted.
+
+ Parameters
+ ----------
+ loc: int, slice or array of ints
+ Indicate which sub-arrays to remove.
+
+ Returns
+ -------
+ new_index : TimedeltaIndex
+ """
+ new_tds = np.delete(self.asi8, loc)
+
+ freq = 'infer'
+ if lib.is_integer(loc):
+ if loc in (0, -len(self), -1, len(self) - 1):
+ freq = self.freq
+ else:
+ if com.is_list_like(loc):
+ loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)))
+ if isinstance(loc, slice) and loc.step in (1, None):
+ if (loc.start in (0, None) or loc.stop in (len(self), None)):
+ freq = self.freq
+
+ return TimedeltaIndex(new_tds, name=self.name, freq=freq)
+
+TimedeltaIndex._add_numeric_methods()
+
+def _is_convertible_to_index(other):
+ """ return a boolean whether I can attempt conversion to a TimedeltaIndex """
+ if isinstance(other, TimedeltaIndex):
+ return True
+ elif (len(other) > 0 and
+ other.inferred_type not in ('floating', 'mixed-integer','integer',
+ 'mixed-integer-float', 'mixed')):
+ return True
+ return False
+
+
+def _is_convertible_to_td(key):
+ return isinstance(key, (DateOffset, timedelta, Timedelta, np.timedelta64, compat.string_types))
+
+def _to_m8(key):
+ '''
+ Timedelta-like => dt64
+ '''
+ if not isinstance(key, Timedelta):
+ # this also converts strings
+ key = Timedelta(key)
+
+ # return an type that can be compared
+ return np.int64(key.value).view(_TD_DTYPE)
+
+def _generate_regular_range(start, end, periods, offset):
+ stride = offset.nanos
+ if periods is None:
+ b = Timedelta(start).value
+ e = Timedelta(end).value
+ e += stride - e % stride
+ elif start is not None:
+ b = Timedelta(start).value
+ e = b + periods * stride
+ elif end is not None:
+ e = Timedelta(end).value + stride
+ b = e - periods * stride
+ else:
+ raise NotImplementedError
+
+ data = np.arange(b, e, stride, dtype=np.int64)
+ data = TimedeltaIndex._simple_new(data, None)
+
+ return data
+
+
+def timedelta_range(start=None, end=None, periods=None, freq='D',
+ name=None, closed=None):
+ """
+ Return a fixed frequency timedelta index, with day as the default
+ frequency
+
+ Parameters
+ ----------
+ start : string or timedelta-like, default None
+ Left bound for generating dates
+ end : string or datetime-like, default None
+ Right bound for generating dates
+ periods : integer or None, default None
+ If None, must specify start and end
+ freq : string or DateOffset, default 'D' (calendar daily)
+ Frequency strings can have multiples, e.g. '5H'
+ name : str, default None
+ Name of the resulting index
+ closed : string or None, default None
+ Make the interval closed with respect to the given frequency to
+ the 'left', 'right', or both sides (None)
+
+ Notes
+ -----
+ 2 of start, end, or periods must be specified
+
+ Returns
+ -------
+ rng : TimedeltaIndex
+ """
+ return TimedeltaIndex(start=start, end=end, periods=periods,
+ freq=freq, name=name,
+ closed=closed)
+
+
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
new file mode 100644
index 0000000000000..58b126a7efab8
--- /dev/null
+++ b/pandas/tseries/tests/test_base.py
@@ -0,0 +1,973 @@
+from __future__ import print_function
+import re
+from datetime import datetime, timedelta
+import numpy as np
+import pandas as pd
+from pandas.tseries.base import DatetimeIndexOpsMixin
+from pandas.util.testing import assertRaisesRegexp, assert_isinstance
+from pandas.tseries.common import is_datetimelike
+from pandas import (Series, Index, Int64Index, Timestamp, DatetimeIndex, PeriodIndex,
+ TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index)
+import pandas.tslib as tslib
+import nose
+
+import pandas.util.testing as tm
+
+from pandas.tests.test_base import Ops
+
+class TestDatetimeIndexOps(Ops):
+ tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
+ 'dateutil/Asia/Singapore', 'dateutil/US/Pacific']
+
+ def setUp(self):
+ super(TestDatetimeIndexOps, self).setUp()
+ mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
+ self.is_valid_objs = [ o for o in self.objs if mask(o) ]
+ self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
+
+ def test_ops_properties(self):
+ self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
+ self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
+ 'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
+
+ def test_ops_properties_basic(self):
+
+ # sanity check that the behavior didn't change
+ # GH7206
+ for op in ['year','day','second','weekday']:
+ self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
+
+ # attribute access should still work!
+ s = Series(dict(year=2000,month=1,day=10))
+ self.assertEquals(s.year,2000)
+ self.assertEquals(s.month,1)
+ self.assertEquals(s.day,10)
+ self.assertRaises(AttributeError, lambda : s.weekday)
+
+ def test_asobject_tolist(self):
+ idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
+ expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
+ pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
+ expected = pd.Index(expected_list, dtype=object, name='idx')
+ result = idx.asobject
+ self.assertTrue(isinstance(result, Index))
+
+ self.assertEqual(result.dtype, object)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(idx.tolist(), expected_list)
+
+ idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo')
+ expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'),
+ pd.Timestamp('2013-02-28', tz='Asia/Tokyo'),
+ pd.Timestamp('2013-03-31', tz='Asia/Tokyo'),
+ pd.Timestamp('2013-04-30', tz='Asia/Tokyo')]
+ expected = pd.Index(expected_list, dtype=object, name='idx')
+ result = idx.asobject
+ self.assertTrue(isinstance(result, Index))
+ self.assertEqual(result.dtype, object)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(idx.tolist(), expected_list)
+
+ idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
+ pd.NaT, datetime(2013, 1, 4)], name='idx')
+ expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'),
+ pd.NaT, pd.Timestamp('2013-01-04')]
+ expected = pd.Index(expected_list, dtype=object, name='idx')
+ result = idx.asobject
+ self.assertTrue(isinstance(result, Index))
+ self.assertEqual(result.dtype, object)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(idx.tolist(), expected_list)
+
+ def test_minmax(self):
+ for tz in self.tz:
+ # monotonic
+ idx1 = pd.DatetimeIndex([pd.NaT, '2011-01-01', '2011-01-02',
+ '2011-01-03'], tz=tz)
+ self.assertTrue(idx1.is_monotonic)
+
+ # non-monotonic
+ idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
+ '2011-01-02', pd.NaT], tz=tz)
+ self.assertFalse(idx2.is_monotonic)
+
+ for idx in [idx1, idx2]:
+ self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz))
+ self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz))
+
+ for op in ['min', 'max']:
+ # Return NaT
+ obj = DatetimeIndex([])
+ self.assertTrue(pd.isnull(getattr(obj, op)()))
+
+ obj = DatetimeIndex([pd.NaT])
+ self.assertTrue(pd.isnull(getattr(obj, op)()))
+
+ obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
+ self.assertTrue(pd.isnull(getattr(obj, op)()))
+
+ def test_representation(self):
+ idx1 = DatetimeIndex([], freq='D')
+ idx2 = DatetimeIndex(['2011-01-01'], freq='D')
+ idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
+ idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
+ idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
+ freq='H', tz='Asia/Tokyo')
+ idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
+ tz='US/Eastern')
+
+ exp1 = """<class 'pandas.tseries.index.DatetimeIndex'>
+Length: 0, Freq: D, Timezone: None"""
+ exp2 = """<class 'pandas.tseries.index.DatetimeIndex'>
+[2011-01-01]
+Length: 1, Freq: D, Timezone: None"""
+ exp3 = """<class 'pandas.tseries.index.DatetimeIndex'>
+[2011-01-01, 2011-01-02]
+Length: 2, Freq: D, Timezone: None"""
+ exp4 = """<class 'pandas.tseries.index.DatetimeIndex'>
+[2011-01-01, ..., 2011-01-03]
+Length: 3, Freq: D, Timezone: None"""
+ exp5 = """<class 'pandas.tseries.index.DatetimeIndex'>
+[2011-01-01 09:00:00+09:00, ..., 2011-01-01 11:00:00+09:00]
+Length: 3, Freq: H, Timezone: Asia/Tokyo"""
+ exp6 = """<class 'pandas.tseries.index.DatetimeIndex'>
+[2011-01-01 09:00:00-05:00, ..., NaT]
+Length: 3, Freq: None, Timezone: US/Eastern"""
+
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
+ [exp1, exp2, exp3, exp4, exp5, exp6]):
+ for func in ['__repr__', '__unicode__', '__str__']:
+ result = getattr(idx, func)()
+ self.assertEqual(result, expected)
+
+ def test_resolution(self):
+ for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
+ ['day', 'day', 'day', 'day',
+ 'hour', 'minute', 'second', 'millisecond', 'microsecond']):
+ for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
+ idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
+ self.assertEqual(idx.resolution, expected)
+
+ def test_add_iadd(self):
+ for tz in self.tz:
+ # union
+ rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
+ expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
+
+ rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
+ expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
+
+ rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other3 = pd.DatetimeIndex([], tz=tz)
+ expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+
+ for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
+ (rng3, other3, expected3)]:
+ result_add = rng + other
+ result_union = rng.union(other)
+
+ tm.assert_index_equal(result_add, expected)
+ tm.assert_index_equal(result_union, expected)
+ rng += other
+ tm.assert_index_equal(rng, expected)
+
+ # offset
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2),
+ np.timedelta64(2, 'h'), Timedelta(hours=2)]
+
+ for delta in offsets:
+ rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
+ result = rng + delta
+ expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz)
+ tm.assert_index_equal(result, expected)
+ rng += delta
+ tm.assert_index_equal(rng, expected)
+
+ # int
+ rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
+ result = rng + 1
+ expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz)
+ tm.assert_index_equal(result, expected)
+ rng += 1
+ tm.assert_index_equal(rng, expected)
+
+ def test_sub_isub(self):
+ for tz in self.tz:
+ # diff
+ rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
+ expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+
+ rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
+ expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
+
+ rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other3 = pd.DatetimeIndex([], tz=tz)
+ expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+
+ for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
+ (rng3, other3, expected3)]:
+ result_add = rng - other
+ result_union = rng.diff(other)
+
+ tm.assert_index_equal(result_add, expected)
+ tm.assert_index_equal(result_union, expected)
+ rng -= other
+ tm.assert_index_equal(rng, expected)
+
+ # offset
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
+ Timedelta(hours=2)]
+
+ for delta in offsets:
+ rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
+ result = rng - delta
+ expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz)
+ tm.assert_index_equal(result, expected)
+ rng -= delta
+ tm.assert_index_equal(rng, expected)
+
+ # int
+ rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
+ result = rng - 1
+ expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz)
+ tm.assert_index_equal(result, expected)
+ rng -= 1
+ tm.assert_index_equal(rng, expected)
+
+ def test_value_counts_unique(self):
+ # GH 7735
+ for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']:
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
+ # create repeated values, 'n'th element is repeated by n+1 times
+ idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
+
+ exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz)
+ expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
+ tm.assert_series_equal(idx.value_counts(), expected)
+
+ expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz)
+ tm.assert_index_equal(idx.unique(), expected)
+
+ idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
+ '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz)
+
+ exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz)
+ expected = Series([3, 2], index=exp_idx)
+ tm.assert_series_equal(idx.value_counts(), expected)
+
+ exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz)
+ expected = Series([3, 2, 1], index=exp_idx)
+ tm.assert_series_equal(idx.value_counts(dropna=False), expected)
+
+ tm.assert_index_equal(idx.unique(), exp_idx)
+
+
+class TestTimedeltaIndexOps(Ops):
+
+ def setUp(self):
+ super(TestTimedeltaIndexOps, self).setUp()
+ mask = lambda x: isinstance(x, TimedeltaIndex)
+ self.is_valid_objs = [ o for o in self.objs if mask(o) ]
+ self.not_valid_objs = [ ]
+
+ def test_ops_properties(self):
+ self.check_ops_properties(['days','hours','minutes','seconds','milliseconds'])
+ self.check_ops_properties(['microseconds','nanoseconds'])
+
+ def test_asobject_tolist(self):
+ idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
+ expected_list = [Timedelta('1 days'),Timedelta('2 days'),Timedelta('3 days'),
+ Timedelta('4 days')]
+ expected = pd.Index(expected_list, dtype=object, name='idx')
+ result = idx.asobject
+ self.assertTrue(isinstance(result, Index))
+
+ self.assertEqual(result.dtype, object)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(idx.tolist(), expected_list)
+
+ idx = TimedeltaIndex([timedelta(days=1),timedelta(days=2),pd.NaT,
+ timedelta(days=4)], name='idx')
+ expected_list = [Timedelta('1 days'),Timedelta('2 days'),pd.NaT,
+ Timedelta('4 days')]
+ expected = pd.Index(expected_list, dtype=object, name='idx')
+ result = idx.asobject
+ self.assertTrue(isinstance(result, Index))
+ self.assertEqual(result.dtype, object)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(idx.tolist(), expected_list)
+
+ def test_minmax(self):
+
+ # monotonic
+ idx1 = TimedeltaIndex(['nat', '1 days', '2 days', '3 days'])
+ self.assertTrue(idx1.is_monotonic)
+
+ # non-monotonic
+ idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
+ self.assertFalse(idx2.is_monotonic)
+
+ for idx in [idx1, idx2]:
+ self.assertEqual(idx.min(), Timedelta('1 days')),
+ self.assertEqual(idx.max(), Timedelta('3 days')),
+
+ for op in ['min', 'max']:
+ # Return NaT
+ obj = TimedeltaIndex([])
+ self.assertTrue(pd.isnull(getattr(obj, op)()))
+
+ obj = TimedeltaIndex([pd.NaT])
+ self.assertTrue(pd.isnull(getattr(obj, op)()))
+
+ obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
+ self.assertTrue(pd.isnull(getattr(obj, op)()))
+
+ def test_representation(self):
+ idx1 = TimedeltaIndex([], freq='D')
+ idx2 = TimedeltaIndex(['1 days'], freq='D')
+ idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
+ idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
+ idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
+
+
+ exp1 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
+Length: 0, Freq: <Day>"""
+ exp2 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
+['1 days']
+Length: 1, Freq: <Day>"""
+ exp3 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
+['1 days', '2 days']
+Length: 2, Freq: <Day>"""
+ exp4 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
+['1 days', ..., '3 days']
+Length: 3, Freq: <Day>"""
+ exp5 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
+['1 days 00:00:01', ..., '3 days 00:00:00']
+Length: 3, Freq: None"""
+
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
+ [exp1, exp2, exp3, exp4, exp5]):
+ for func in ['__repr__', '__unicode__', '__str__']:
+ result = getattr(idx, func)()
+ self.assertEqual(result, expected)
+
+ def test_add_iadd(self):
+
+ # only test adding/sub offsets as + is now numeric
+
+ # offset
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2),
+ np.timedelta64(2, 'h'), Timedelta(hours=2)]
+
+ for delta in offsets:
+ rng = timedelta_range('1 days','10 days')
+ result = rng + delta
+ expected = timedelta_range('1 days 02:00:00','10 days 02:00:00',freq='D')
+ tm.assert_index_equal(result, expected)
+ rng += delta
+ tm.assert_index_equal(rng, expected)
+
+ # int
+ rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
+ result = rng + 1
+ expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
+ tm.assert_index_equal(result, expected)
+ rng += 1
+ tm.assert_index_equal(rng, expected)
+
+ def test_sub_isub(self):
+
+ # only test adding/sub offsets as - is now numeric
+
+ # offset
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
+ Timedelta(hours=2)]
+
+ for delta in offsets:
+ rng = timedelta_range('1 days','10 days')
+ result = rng - delta
+ expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
+ tm.assert_index_equal(result, expected)
+ rng -= delta
+ tm.assert_index_equal(rng, expected)
+
+ # int
+ rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
+ result = rng - 1
+ expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
+ tm.assert_index_equal(result, expected)
+ rng -= 1
+ tm.assert_index_equal(rng, expected)
+
+ def test_ops_compat(self):
+
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
+ Timedelta(hours=2)]
+
+ rng = timedelta_range('1 days','10 days',name='foo')
+
+ # multiply
+ for offset in offsets:
+ self.assertRaises(TypeError, lambda : rng * offset)
+
+ # divide
+ expected = Int64Index((np.arange(10)+1)*12,name='foo')
+ for offset in offsets:
+ result = rng / offset
+ tm.assert_index_equal(result,expected)
+
+ # divide with nats
+ rng = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
+ expected = Float64Index([12,np.nan,24])
+ for offset in offsets:
+ result = rng / offset
+ tm.assert_index_equal(result,expected)
+
+ # don't allow division by NaT (make could in the future)
+ self.assertRaises(TypeError, lambda : rng / pd.NaT)
+
+ def test_subtraction_ops(self):
+
+ # with datetimes/timedelta and tdi/dti
+ tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
+ dti = date_range('20130101',periods=3)
+ td = Timedelta('1 days')
+ dt = Timestamp('20130101')
+
+ self.assertRaises(TypeError, lambda : tdi - dt)
+ self.assertRaises(TypeError, lambda : tdi - dti)
+
+ result = dt-dti
+ expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
+ tm.assert_index_equal(result,expected)
+
+ result = dti-dt
+ expected = TimedeltaIndex(['0 days','1 days','2 days'])
+ tm.assert_index_equal(result,expected)
+
+ result = tdi-td
+ expected = TimedeltaIndex(['0 days',pd.NaT,'1 days'])
+ tm.assert_index_equal(result,expected)
+
+ result = td-tdi
+ expected = TimedeltaIndex(['0 days',pd.NaT,'-1 days'])
+ tm.assert_index_equal(result,expected)
+
+ result = dti-td
+ expected = DatetimeIndex(['20121231','20130101','20130102'])
+ tm.assert_index_equal(result,expected)
+
+ result = dt-tdi
+ expected = DatetimeIndex(['20121231',pd.NaT,'20121230'])
+ tm.assert_index_equal(result,expected)
+
+ def test_dti_tdi_numeric_ops(self):
+
+ # These are normally union/diff set-like ops
+ tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
+ dti = date_range('20130101',periods=3)
+ td = Timedelta('1 days')
+ dt = Timestamp('20130101')
+
+ result = tdi-tdi
+ expected = TimedeltaIndex(['0 days',pd.NaT,'0 days'])
+ tm.assert_index_equal(result,expected)
+
+ result = tdi+tdi
+ expected = TimedeltaIndex(['2 days',pd.NaT,'4 days'])
+ tm.assert_index_equal(result,expected)
+
+ result = dti-tdi
+ expected = DatetimeIndex(['20121231',pd.NaT,'20130101'])
+ tm.assert_index_equal(result,expected)
+
+ def test_addition_ops(self):
+
+ # with datetimes/timedelta and tdi/dti
+ tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
+ dti = date_range('20130101',periods=3)
+ td = Timedelta('1 days')
+ dt = Timestamp('20130101')
+
+ result = tdi + dt
+ expected = DatetimeIndex(['20130102',pd.NaT,'20130103'])
+ tm.assert_index_equal(result,expected)
+
+ result = dt + tdi
+ expected = DatetimeIndex(['20130102',pd.NaT,'20130103'])
+ tm.assert_index_equal(result,expected)
+
+ result = td + tdi
+ expected = TimedeltaIndex(['2 days',pd.NaT,'3 days'])
+ tm.assert_index_equal(result,expected)
+
+ result = tdi + td
+ expected = TimedeltaIndex(['2 days',pd.NaT,'3 days'])
+ tm.assert_index_equal(result,expected)
+
+ # unequal length
+ self.assertRaises(ValueError, lambda : tdi + dti[0:1])
+ self.assertRaises(ValueError, lambda : tdi[0:1] + dti)
+
+ # random indexes
+ self.assertRaises(TypeError, lambda : tdi + Int64Index([1,2,3]))
+
+ # this is a union!
+ #self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
+
+ result = tdi + dti
+ expected = DatetimeIndex(['20130102',pd.NaT,'20130105'])
+ tm.assert_index_equal(result,expected)
+
+ result = dti + tdi
+ expected = DatetimeIndex(['20130102',pd.NaT,'20130105'])
+ tm.assert_index_equal(result,expected)
+
+ result = dt + td
+ expected = Timestamp('20130102')
+ self.assertEqual(result,expected)
+
+ result = td + dt
+ expected = Timestamp('20130102')
+ self.assertEqual(result,expected)
+
+ def test_value_counts_unique(self):
+ # GH 7735
+
+ idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
+ # create repeated values, 'n'th element is repeated by n+1 times
+ idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
+
+ exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
+ expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
+ tm.assert_series_equal(idx.value_counts(), expected)
+
+ expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
+ tm.assert_index_equal(idx.unique(), expected)
+
+ idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00',
+ '1 days 08:00:00', '1 days 08:00:00', pd.NaT])
+
+ exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
+ expected = Series([3, 2], index=exp_idx)
+ tm.assert_series_equal(idx.value_counts(), expected)
+
+ exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00', pd.NaT])
+ expected = Series([3, 2, 1], index=exp_idx)
+ tm.assert_series_equal(idx.value_counts(dropna=False), expected)
+
+ tm.assert_index_equal(idx.unique(), exp_idx)
+
+class TestPeriodIndexOps(Ops):
+
+ def setUp(self):
+ super(TestPeriodIndexOps, self).setUp()
+ mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
+ self.is_valid_objs = [ o for o in self.objs if mask(o) ]
+ self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
+
+ def test_ops_properties(self):
+ self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
+ self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex))
+
+ def test_asobject_tolist(self):
+ idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx')
+ expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'),
+ pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')]
+ expected = pd.Index(expected_list, dtype=object, name='idx')
+ result = idx.asobject
+ self.assertTrue(isinstance(result, Index))
+ self.assertEqual(result.dtype, object)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(idx.tolist(), expected_list)
+
+ idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx')
+ expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'),
+ pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')]
+ expected = pd.Index(expected_list, dtype=object, name='idx')
+ result = idx.asobject
+ self.assertTrue(isinstance(result, Index))
+ self.assertEqual(result.dtype, object)
+ for i in [0, 1, 3]:
+ self.assertTrue(result[i], expected[i])
+ self.assertTrue(result[2].ordinal, pd.tslib.iNaT)
+ self.assertTrue(result[2].freq, 'D')
+ self.assertEqual(result.name, expected.name)
+
+ result_list = idx.tolist()
+ for i in [0, 1, 3]:
+ self.assertTrue(result_list[i], expected_list[i])
+ self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT)
+ self.assertTrue(result_list[2].freq, 'D')
+
+ def test_minmax(self):
+
+ # monotonic
+ idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
+ '2011-01-03'], freq='D')
+ self.assertTrue(idx1.is_monotonic)
+
+ # non-monotonic
+ idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
+ '2011-01-02', pd.NaT], freq='D')
+ self.assertFalse(idx2.is_monotonic)
+
+ for idx in [idx1, idx2]:
+ self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
+ self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
+
+ for op in ['min', 'max']:
+ # Return NaT
+ obj = PeriodIndex([], freq='M')
+ result = getattr(obj, op)()
+ self.assertEqual(result.ordinal, tslib.iNaT)
+ self.assertEqual(result.freq, 'M')
+
+ obj = PeriodIndex([pd.NaT], freq='M')
+ result = getattr(obj, op)()
+ self.assertEqual(result.ordinal, tslib.iNaT)
+ self.assertEqual(result.freq, 'M')
+
+ obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
+ result = getattr(obj, op)()
+ self.assertEqual(result.ordinal, tslib.iNaT)
+ self.assertEqual(result.freq, 'M')
+
+ def test_representation(self):
+ # GH 7601
+ idx1 = PeriodIndex([], freq='D')
+ idx2 = PeriodIndex(['2011-01-01'], freq='D')
+ idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
+ idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
+ idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
+ idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
+
+ idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
+ idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
+ idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
+
+ exp1 = """<class 'pandas.tseries.period.PeriodIndex'>
+Length: 0, Freq: D"""
+ exp2 = """<class 'pandas.tseries.period.PeriodIndex'>
+[2011-01-01]
+Length: 1, Freq: D"""
+ exp3 = """<class 'pandas.tseries.period.PeriodIndex'>
+[2011-01-01, 2011-01-02]
+Length: 2, Freq: D"""
+ exp4 = """<class 'pandas.tseries.period.PeriodIndex'>
+[2011-01-01, ..., 2011-01-03]
+Length: 3, Freq: D"""
+ exp5 = """<class 'pandas.tseries.period.PeriodIndex'>
+[2011, ..., 2013]
+Length: 3, Freq: A-DEC"""
+ exp6 = """<class 'pandas.tseries.period.PeriodIndex'>
+[2011-01-01 09:00, ..., NaT]
+Length: 3, Freq: H"""
+ exp7 = """<class 'pandas.tseries.period.PeriodIndex'>
+[2013Q1]
+Length: 1, Freq: Q-DEC"""
+ exp8 = """<class 'pandas.tseries.period.PeriodIndex'>
+[2013Q1, 2013Q2]
+Length: 2, Freq: Q-DEC"""
+ exp9 = """<class 'pandas.tseries.period.PeriodIndex'>
+[2013Q1, ..., 2013Q3]
+Length: 3, Freq: Q-DEC"""
+
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
+ [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
+ for func in ['__repr__', '__unicode__', '__str__']:
+ result = getattr(idx, func)()
+ self.assertEqual(result, expected)
+
+ def test_resolution(self):
+ for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
+ ['day', 'day', 'day', 'day',
+ 'hour', 'minute', 'second', 'millisecond', 'microsecond']):
+
+ idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
+ self.assertEqual(idx.resolution, expected)
+
+ def test_add_iadd(self):
+ # union
+ rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
+ other1 = pd.period_range('1/6/2000', freq='D', periods=5)
+ expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
+
+ rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
+ other2 = pd.period_range('1/4/2000', freq='D', periods=5)
+ expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
+
+ rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
+ other3 = pd.PeriodIndex([], freq='D')
+ expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
+
+ rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
+ other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
+ expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
+ '2000-01-01 11:00', '2000-01-01 12:00',
+ '2000-01-01 13:00', '2000-01-02 09:00',
+ '2000-01-02 10:00', '2000-01-02 11:00',
+ '2000-01-02 12:00', '2000-01-02 13:00'],
+ freq='H')
+
+ rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
+ '2000-01-01 09:05'], freq='T')
+ other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
+ '2000-01-01 09:08'], freq='T')
+ expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
+ '2000-01-01 09:05', '2000-01-01 09:08'],
+ freq='T')
+
+ rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
+ other6 = pd.period_range('2000-04-01', freq='M', periods=7)
+ expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
+
+ rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
+ other7 = pd.period_range('1998-01-01', freq='A', periods=8)
+ expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
+
+ for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
+ (rng3, other3, expected3), (rng4, other4, expected4),
+ (rng5, other5, expected5), (rng6, other6, expected6),
+ (rng7, other7, expected7)]:
+
+ result_add = rng + other
+ result_union = rng.union(other)
+
+ tm.assert_index_equal(result_add, expected)
+ tm.assert_index_equal(result_union, expected)
+ # GH 6527
+ rng += other
+ tm.assert_index_equal(rng, expected)
+
+ # offset
+ # DateOffset
+ rng = pd.period_range('2014', '2024', freq='A')
+ result = rng + pd.offsets.YearEnd(5)
+ expected = pd.period_range('2019', '2029', freq='A')
+ tm.assert_index_equal(result, expected)
+ rng += pd.offsets.YearEnd(5)
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng + o
+
+ rng = pd.period_range('2014-01', '2016-12', freq='M')
+ result = rng + pd.offsets.MonthEnd(5)
+ expected = pd.period_range('2014-06', '2017-05', freq='M')
+ tm.assert_index_equal(result, expected)
+ rng += pd.offsets.MonthEnd(5)
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
+ rng = pd.period_range('2014-01', '2016-12', freq='M')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng + o
+
+ # Tick
+ offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
+ pd.offsets.Hour(72), timedelta(minutes=60*24*3),
+ np.timedelta64(72, 'h'), Timedelta('72:00:00')]
+ for delta in offsets:
+ rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
+ result = rng + delta
+ expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
+ tm.assert_index_equal(result, expected)
+ rng += delta
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(4, 'h'), timedelta(hours=23), Timedelta('23:00:00')]:
+ rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng + o
+
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
+ pd.offsets.Minute(120), timedelta(minutes=120),
+ np.timedelta64(120, 'm'), Timedelta(minutes=120)]
+ for delta in offsets:
+ rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
+ result = rng + delta
+ expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H')
+ tm.assert_index_equal(result, expected)
+ rng += delta
+ tm.assert_index_equal(rng, expected)
+
+ for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
+ np.timedelta64(30, 's'), Timedelta(seconds=30)]:
+ rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ result = rng + delta
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng += delta
+
+ # int
+ rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
+ result = rng + 1
+ expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
+ tm.assert_index_equal(result, expected)
+ rng += 1
+ tm.assert_index_equal(rng, expected)
+
+ def test_sub_isub(self):
+ # diff
+ rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
+ other1 = pd.period_range('1/6/2000', freq='D', periods=5)
+ expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
+
+ rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
+ other2 = pd.period_range('1/4/2000', freq='D', periods=5)
+ expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
+
+ rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
+ other3 = pd.PeriodIndex([], freq='D')
+ expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
+
+ rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
+ other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
+ expected4 = rng4
+
+ rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
+ '2000-01-01 09:05'], freq='T')
+ other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
+ expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
+
+ rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
+ other6 = pd.period_range('2000-04-01', freq='M', periods=7)
+ expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
+
+ rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
+ other7 = pd.period_range('1998-01-01', freq='A', periods=8)
+ expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
+
+ for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
+ (rng3, other3, expected3), (rng4, other4, expected4),
+ (rng5, other5, expected5), (rng6, other6, expected6),
+ (rng7, other7, expected7),]:
+ result_add = rng - other
+ result_union = rng.diff(other)
+
+ tm.assert_index_equal(result_add, expected)
+ tm.assert_index_equal(result_union, expected)
+ rng -= other
+ tm.assert_index_equal(rng, expected)
+
+ # offset
+ # DateOffset
+ rng = pd.period_range('2014', '2024', freq='A')
+ result = rng - pd.offsets.YearEnd(5)
+ expected = pd.period_range('2009', '2019', freq='A')
+ tm.assert_index_equal(result, expected)
+ rng -= pd.offsets.YearEnd(5)
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ rng = pd.period_range('2014', '2024', freq='A')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng - o
+
+ rng = pd.period_range('2014-01', '2016-12', freq='M')
+ result = rng - pd.offsets.MonthEnd(5)
+ expected = pd.period_range('2013-08', '2016-07', freq='M')
+ tm.assert_index_equal(result, expected)
+ rng -= pd.offsets.MonthEnd(5)
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ rng = pd.period_range('2014-01', '2016-12', freq='M')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng - o
+
+ # Tick
+ offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
+ pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
+ for delta in offsets:
+ rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
+ result = rng - delta
+ expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
+ tm.assert_index_equal(result, expected)
+ rng -= delta
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(4, 'h'), timedelta(hours=23)]:
+ rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng - o
+
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
+ pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
+ for delta in offsets:
+ rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
+ result = rng - delta
+ expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H')
+ tm.assert_index_equal(result, expected)
+ rng -= delta
+ tm.assert_index_equal(rng, expected)
+
+ for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
+ rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ result = rng + delta
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng += delta
+
+ # int
+ rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
+ result = rng - 1
+ expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
+ tm.assert_index_equal(result, expected)
+ rng -= 1
+ tm.assert_index_equal(rng, expected)
+
+ def test_value_counts_unique(self):
+ # GH 7735
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
+ # create repeated values, 'n'th element is repeated by n+1 times
+ idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H')
+
+ exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00',
+ '2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00',
+ '2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00',
+ '2011-01-01 09:00'], freq='H')
+ expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
+ tm.assert_series_equal(idx.value_counts(), expected)
+
+ expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
+ tm.assert_index_equal(idx.unique(), expected)
+
+ idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
+ '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H')
+
+ exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H')
+ expected = Series([3, 2], index=exp_idx)
+ tm.assert_series_equal(idx.value_counts(), expected)
+
+ exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H')
+ expected = Series([3, 2, 1], index=exp_idx)
+ tm.assert_series_equal(idx.value_counts(dropna=False), expected)
+
+ tm.assert_index_equal(idx.unique(), exp_idx)
+
+
+if __name__ == '__main__':
+ import nose
+
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ # '--with-coverage', '--cover-package=pandas.core'],
+ exit=False)
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 769062f293cf9..48d3f3a551055 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -1,22 +1,29 @@
# pylint: disable-msg=E1101,W0612
+from __future__ import division
from datetime import datetime, timedelta
import nose
import numpy as np
import pandas as pd
-from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull,
- bdate_range, date_range)
+from pandas import (Index, Series, DataFrame, Timestamp, Timedelta, TimedeltaIndex, isnull, notnull,
+ bdate_range, date_range, timedelta_range, Int64Index)
import pandas.core.common as com
-from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
+from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long, PY3_2
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
+ assert_index_equal,
ensure_clean)
+from pandas.tseries.offsets import Day, Second, Hour
import pandas.util.testing as tm
+from numpy.random import rand, randn
+from pandas import _np_version_under1p8
+
+iNaT = tslib.iNaT
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
@@ -24,6 +31,240 @@ class TestTimedeltas(tm.TestCase):
def setUp(self):
pass
+ def test_construction(self):
+
+ expected = np.timedelta64(10,'D').astype('m8[ns]').view('i8')
+ self.assertEqual(Timedelta(10,unit='d').value, expected)
+ self.assertEqual(Timedelta(10.0,unit='d').value, expected)
+ self.assertEqual(Timedelta('10 days').value, expected)
+ self.assertEqual(Timedelta(days=10).value, expected)
+
+ expected += np.timedelta64(10,'s').astype('m8[ns]').view('i8')
+ self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
+ self.assertEqual(Timedelta(days=10,seconds=10).value, expected)
+ self.assertEqual(Timedelta(days=10,milliseconds=10*1000).value, expected)
+ self.assertEqual(Timedelta(days=10,microseconds=10*1000*1000).value, expected)
+
+ # rounding cases
+ self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
+ self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
+ self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
+ self.assertTrue('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
+
+ # more strings
+ # GH 8190
+ self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
+ self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
+ self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
+ self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
+ self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
+ self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
+ self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
+ self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
+ self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
+ self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
+ self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
+ self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
+ self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
+ self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
+ self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
+ self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
+ self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
+ self.assertEqual(Timedelta('1.5 microsecond'), Timedelta('00:00:00.000001500'))
+ self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
+ self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
+ self.assertEqual(Timedelta('1 nanosecond'), Timedelta('00:00:00.000000001'))
+
+ # combos
+ self.assertEqual(Timedelta('10 days 1 hour'), timedelta(days=10,hours=1))
+ self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10,hours=1))
+ self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(days=10,hours=1,minutes=1,seconds=1))
+ self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -timedelta(days=10,hours=1,minutes=1,seconds=1))
+ self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -timedelta(days=10,hours=1,minutes=1,seconds=1))
+ self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -timedelta(days=10,hours=1,minutes=1,seconds=1,microseconds=3))
+ self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -timedelta(days=10,hours=1,minutes=1,seconds=31,microseconds=3))
+
+ # currently invalid as it has a - on the hhmmdd part (only allowed on the days)
+ self.assertRaises(ValueError, lambda : Timedelta('-10 days -1 h 1.5m 1s 3us'))
+
+ # roundtripping both for string and value
+ for v in ['1s',
+ '-1s',
+ '1us',
+ '-1us',
+ '1 day',
+ '-1 day',
+ '-23:59:59.999999',
+ '-1 days +23:59:59.999999',
+ '-1ns',
+ '1ns',
+ '-23:59:59.999999999']:
+
+ td = Timedelta(v)
+ self.assertEqual(Timedelta(td.value),td)
+
+ # str does not normally display nanos
+ if not td.nanoseconds:
+ self.assertEqual(Timedelta(str(td)),td)
+ self.assertEqual(Timedelta(td._repr_base(format='all')),td)
+
+ # floats
+ expected = np.timedelta64(10,'s').astype('m8[ns]').view('i8') + np.timedelta64(500,'ms').astype('m8[ns]').view('i8')
+ self.assertEqual(Timedelta(10.5,unit='s').value, expected)
+
+ # nat
+ self.assertEqual(Timedelta('').value,iNaT)
+ self.assertEqual(Timedelta('nat').value,iNaT)
+ self.assertEqual(Timedelta('NAT').value,iNaT)
+ self.assertTrue(isnull(Timestamp('nat')))
+ self.assertTrue(isnull(Timedelta('nat')))
+
+ # offset
+ self.assertEqual(to_timedelta(pd.offsets.Hour(2)),Timedelta('0 days, 02:00:00'))
+ self.assertEqual(Timedelta(pd.offsets.Hour(2)),Timedelta('0 days, 02:00:00'))
+ self.assertEqual(Timedelta(pd.offsets.Second(2)),Timedelta('0 days, 00:00:02'))
+
+ # invalid
+ tm.assertRaisesRegexp(ValueError,
+ "cannot construct a TimeDelta",
+ lambda : Timedelta())
+ tm.assertRaisesRegexp(ValueError,
+ "cannot create timedelta string convert",
+ lambda : Timedelta('foo'))
+ tm.assertRaisesRegexp(ValueError,
+ "cannot construct a TimeDelta from the passed arguments, allowed keywords are ",
+ lambda : Timedelta(day=10))
+
+ def test_repr(self):
+
+ self.assertEqual(repr(Timedelta(10,unit='d')),"Timedelta('10 days 00:00:00')")
+ self.assertEqual(repr(Timedelta(10,unit='s')),"Timedelta('0 days 00:00:10')")
+ self.assertEqual(repr(Timedelta(10,unit='ms')),"Timedelta('0 days 00:00:00.010000')")
+ self.assertEqual(repr(Timedelta(-10,unit='ms')),"Timedelta('-1 days +23:59:59.990000')")
+
+ def test_identity(self):
+
+ td = Timedelta(10,unit='d')
+ self.assertTrue(isinstance(td, Timedelta))
+ self.assertTrue(isinstance(td, timedelta))
+
+ def test_conversion(self):
+
+ for td in [ Timedelta(10,unit='d'), Timedelta('1 days, 10:11:12.012345') ]:
+ self.assertTrue(td == Timedelta(td.to_pytimedelta()))
+ self.assertEqual(td,td.to_pytimedelta())
+ self.assertEqual(td,np.timedelta64(td.value,'ns'))
+
+ # this is NOT equal and cannot be roundtriped (because of the nanos)
+ td = Timedelta('1 days, 10:11:12.012345678')
+ self.assertTrue(td != td.to_pytimedelta())
+
+ def test_ops(self):
+
+ td = Timedelta(10,unit='d')
+ self.assertEqual(-td,Timedelta(-10,unit='d'))
+ self.assertEqual(+td,Timedelta(10,unit='d'))
+ self.assertEqual(td - td, Timedelta(0,unit='ns'))
+ self.assertTrue((td - pd.NaT) is pd.NaT)
+ self.assertEqual(td + td, Timedelta(20,unit='d'))
+ self.assertTrue((td + pd.NaT) is pd.NaT)
+ self.assertEqual(td * 2, Timedelta(20,unit='d'))
+ self.assertTrue((td * pd.NaT) is pd.NaT)
+ self.assertEqual(td / 2, Timedelta(5,unit='d'))
+ self.assertEqual(abs(td), td)
+ self.assertEqual(abs(-td), td)
+ self.assertEqual(td / td, 1)
+ self.assertTrue((td / pd.NaT) is pd.NaT)
+
+ # invert
+ self.assertEqual(-td,Timedelta('-10d'))
+ self.assertEqual(td * -1,Timedelta('-10d'))
+ self.assertEqual(-1 * td,Timedelta('-10d'))
+ self.assertEqual(abs(-td),Timedelta('10d'))
+
+ # invalid
+ self.assertRaises(TypeError, lambda : Timedelta(11,unit='d') // 2)
+
+ # invalid multiply with another timedelta
+ self.assertRaises(TypeError, lambda : td * td)
+
+ # can't operate with integers
+ self.assertRaises(TypeError, lambda : td + 2)
+ self.assertRaises(TypeError, lambda : td - 2)
+
+ def test_freq_conversion(self):
+
+ td = Timedelta('1 days 2 hours 3 ns')
+ result = td / np.timedelta64(1,'D')
+ self.assertEquals(result, td.value/float(86400*1e9))
+ result = td / np.timedelta64(1,'s')
+ self.assertEquals(result, td.value/float(1e9))
+ result = td / np.timedelta64(1,'ns')
+ self.assertEquals(result, td.value)
+
+ def test_fields(self):
+ rng = to_timedelta('1 days, 10:11:12')
+ self.assertEqual(rng.days,1)
+ self.assertEqual(rng.hours,10)
+ self.assertEqual(rng.minutes,11)
+ self.assertEqual(rng.seconds,12)
+ self.assertEqual(rng.milliseconds,0)
+ self.assertEqual(rng.microseconds,0)
+ self.assertEqual(rng.nanoseconds,0)
+
+ td = Timedelta('-1 days, 10:11:12')
+ self.assertEqual(abs(td),Timedelta('13:48:48'))
+ self.assertTrue(str(td) == "-1 days +10:11:12")
+ self.assertEqual(-td,Timedelta('0 days 13:48:48'))
+ self.assertEqual(-Timedelta('-1 days, 10:11:12').value,49728000000000)
+ self.assertEqual(Timedelta('-1 days, 10:11:12').value,-49728000000000)
+
+ rng = to_timedelta('-1 days, 10:11:12')
+ self.assertEqual(rng.days,-1)
+ self.assertEqual(rng.hours,10)
+ self.assertEqual(rng.minutes,11)
+ self.assertEqual(rng.seconds,12)
+ self.assertEqual(rng.milliseconds,0)
+ self.assertEqual(rng.microseconds,0)
+ self.assertEqual(rng.nanoseconds,0)
+
+ # components
+ tup = pd.to_timedelta(-1, 'us').components
+ self.assertEqual(tup.days,-1)
+ self.assertEqual(tup.hours,23)
+ self.assertEqual(tup.minutes,59)
+ self.assertEqual(tup.seconds,59)
+ self.assertEqual(tup.milliseconds,999)
+ self.assertEqual(tup.microseconds,999)
+ self.assertEqual(tup.nanoseconds,0)
+
+ tup = Timedelta('-1 days 1 us').components
+ self.assertEqual(tup.days,-2)
+ self.assertEqual(tup.hours,23)
+ self.assertEqual(tup.minutes,59)
+ self.assertEqual(tup.seconds,59)
+ self.assertEqual(tup.milliseconds,999)
+ self.assertEqual(tup.microseconds,999)
+ self.assertEqual(tup.nanoseconds,0)
+
+ def test_timedelta_range(self):
+
+ expected = to_timedelta(np.arange(5),unit='D')
+ result = timedelta_range('0 days',periods=5,freq='D')
+ tm.assert_index_equal(result, expected)
+
+ expected = to_timedelta(np.arange(11),unit='D')
+ result = timedelta_range('0 days','10 days',freq='D')
+ tm.assert_index_equal(result, expected)
+
+ expected = to_timedelta(np.arange(5),unit='D') + Second(2) + Day()
+ result = timedelta_range('1 days, 00:00:02','5 days, 00:00:02',freq='D')
+ tm.assert_index_equal(result, expected)
+
+ expected = to_timedelta([1,3,5,7,9],unit='D') + Second(2)
+ result = timedelta_range('1 days, 00:00:02',periods=5,freq='2D')
+ tm.assert_index_equal(result, expected)
+
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
@@ -99,7 +340,7 @@ def conv(v):
self.assertEqual(ct('06:00:01.0'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.01'), conv(np.timedelta64(1000*(6*3600+1)+10,'ms')))
- self.assertEqual(ct('- 1days, 00:00:01'), -conv(d1+np.timedelta64(1,'s')))
+ self.assertEqual(ct('- 1days, 00:00:01'), conv(-d1+np.timedelta64(1,'s')))
self.assertEqual(ct('1days, 06:00:01'), conv(d1+np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(d1+np.timedelta64(1000*(6*3600+1)+10,'ms')))
@@ -141,9 +382,9 @@ def conv(v):
tm.assert_series_equal(result, expected)
# with units
- result = Series([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ],dtype='m8[ns]')
+ result = TimedeltaIndex([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ])
expected = to_timedelta([0,10],unit='s')
- tm.assert_series_equal(result, expected)
+ tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
@@ -159,40 +400,40 @@ def conv(v):
# arrays of various dtypes
arr = np.array([1]*5,dtype='int64')
result = to_timedelta(arr,unit='s')
- expected = Series([ np.timedelta64(1,'s') ]*5)
- tm.assert_series_equal(result, expected)
+ expected = TimedeltaIndex([ np.timedelta64(1,'s') ]*5)
+ tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='int64')
result = to_timedelta(arr,unit='m')
- expected = Series([ np.timedelta64(1,'m') ]*5)
- tm.assert_series_equal(result, expected)
+ expected = TimedeltaIndex([ np.timedelta64(1,'m') ]*5)
+ tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='int64')
result = to_timedelta(arr,unit='h')
- expected = Series([ np.timedelta64(1,'h') ]*5)
- tm.assert_series_equal(result, expected)
+ expected = TimedeltaIndex([ np.timedelta64(1,'h') ]*5)
+ tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='timedelta64[s]')
result = to_timedelta(arr)
- expected = Series([ np.timedelta64(1,'s') ]*5)
- tm.assert_series_equal(result, expected)
+ expected = TimedeltaIndex([ np.timedelta64(1,'s') ]*5)
+ tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='timedelta64[D]')
result = to_timedelta(arr)
- expected = Series([ np.timedelta64(1,'D') ]*5)
- tm.assert_series_equal(result, expected)
+ expected = TimedeltaIndex([ np.timedelta64(1,'D') ]*5)
+ tm.assert_index_equal(result, expected)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5),unit=unit)
- expected = Series([ np.timedelta64(i,transform(unit)) for i in np.arange(5).tolist() ])
- tm.assert_series_equal(result, expected)
+ expected = TimedeltaIndex([ np.timedelta64(i,transform(unit)) for i in np.arange(5).tolist() ])
+ tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2,unit=unit)
- expected = np.timedelta64(2,transform(unit)).astype('timedelta64[ns]')
- self.assert_numpy_array_equal(result,expected)
+ expected = Timedelta(np.timedelta64(2,transform(unit)).astype('timedelta64[ns]'))
+ self.assertEqual(result, expected)
# validate all units
# GH 6855
@@ -212,8 +453,6 @@ def testit(unit, transform):
testit('L',lambda x: 'ms')
# these will error
- self.assertRaises(ValueError, lambda : to_timedelta(['1h']))
- self.assertRaises(ValueError, lambda : to_timedelta(['1m']))
self.assertRaises(ValueError, lambda : to_timedelta([1,2],unit='foo'))
self.assertRaises(ValueError, lambda : to_timedelta(1,unit='foo'))
@@ -228,30 +467,28 @@ def test_to_timedelta_via_apply(self):
def test_timedelta_ops(self):
# GH4984
- # make sure ops return timedeltas
+ # make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])
td = s.diff()
- result = td.mean()[0]
- # TODO This should have returned a scalar to begin with. Hack for now.
+ result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
- tm.assert_almost_equal(result, expected)
+ self.assertEqual(result, expected)
result = td.quantile(.1)
- # This properly returned a scalar.
- expected = np.timedelta64(2599999999,'ns')
- tm.assert_almost_equal(result, expected)
+ expected = Timedelta(np.timedelta64(2600,'ms'))
+ self.assertEqual(result, expected)
- result = td.median()[0]
- # TODO This should have returned a scalar to begin with. Hack for now.
+ result = td.median()
expected = to_timedelta('00:00:08')
- tm.assert_almost_equal(result, expected)
+ self.assertEqual(result, expected)
# GH 6462
# consistency in returned values for sum
- result = td.sum()[0]
+ result = td.sum()
expected = to_timedelta('00:01:21')
tm.assert_almost_equal(result, expected)
+ self.assertEqual(result, expected)
def test_timedelta_ops_scalar(self):
# GH 6808
@@ -297,10 +534,10 @@ def test_to_timedelta_on_missing_values(self):
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
- self.assertEqual(actual.astype('int64'), timedelta_NaT.astype('int64'))
+ self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
actual = pd.to_timedelta(pd.NaT)
- self.assertEqual(actual.astype('int64'), timedelta_NaT.astype('int64'))
+ self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
def test_timedelta_ops_with_missing_values(self):
# setup
@@ -394,6 +631,567 @@ def test_apply_to_timedelta(self):
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
+ def test_pickle(self):
+
+ v = Timedelta('1 days 10:11:12.0123456')
+ v_p = self.round_trip_pickle(v)
+ self.assertEqual(v,v_p)
+
+class TestTimedeltaIndex(tm.TestCase):
+ _multiprocess_can_split_ = True
+
+ def test_pass_TimedeltaIndex_to_index(self):
+
+ rng = timedelta_range('1 days','10 days')
+ idx = Index(rng, dtype=object)
+
+ expected = Index(rng.to_pytimedelta(), dtype=object)
+
+ self.assert_numpy_array_equal(idx.values, expected.values)
+
+ def test_pickle(self):
+
+ rng = timedelta_range('1 days', periods=10)
+ rng_p = self.round_trip_pickle(rng)
+ tm.assert_index_equal(rng,rng_p)
+
+ def test_hash_error(self):
+ index = timedelta_range('1 days', periods=10)
+ with tm.assertRaisesRegexp(TypeError,
+ "unhashable type: %r" %
+ type(index).__name__):
+ hash(index)
+
+ def test_append_join_nondatetimeindex(self):
+ rng = timedelta_range('1 days', periods=10)
+ idx = Index(['a', 'b', 'c', 'd'])
+
+ result = rng.append(idx)
+ tm.assert_isinstance(result[0], Timedelta)
+
+ # it works
+ rng.join(idx, how='outer')
+
+ def test_append_numpy_bug_1681(self):
+
+ td = timedelta_range('1 days','10 days',freq='2D')
+ a = DataFrame()
+ c = DataFrame({'A': 'foo', 'B': td}, index=td)
+ str(c)
+
+ result = a.append(c)
+ self.assertTrue((result['B'] == td).all())
+
+ def test_astype(self):
+ rng = timedelta_range('1 days', periods=10)
+
+ result = rng.astype('i8')
+ self.assert_numpy_array_equal(result, rng.asi8)
+
+ def test_fields(self):
+ rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
+ self.assert_numpy_array_equal(rng.days, np.array([1,1],dtype='int64'))
+ self.assert_numpy_array_equal(rng.hours, np.array([10,10],dtype='int64'))
+ self.assert_numpy_array_equal(rng.minutes, np.array([11,11],dtype='int64'))
+ self.assert_numpy_array_equal(rng.seconds, np.array([12,13],dtype='int64'))
+ self.assert_numpy_array_equal(rng.milliseconds, np.array([0,0],dtype='int64'))
+ self.assert_numpy_array_equal(rng.microseconds, np.array([0,0],dtype='int64'))
+ self.assert_numpy_array_equal(rng.nanoseconds, np.array([0,0],dtype='int64'))
+
+ # with nat
+ s = Series(rng)
+ s[1] = np.nan
+
+ tm.assert_series_equal(s.dt.days,Series([1,np.nan],index=[0,1]))
+ tm.assert_series_equal(s.dt.hours,Series([10,np.nan],index=[0,1]))
+ tm.assert_series_equal(s.dt.milliseconds,Series([0,np.nan],index=[0,1]))
+
+ def test_components(self):
+ rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
+ rng.components
+
+ # with nat
+ s = Series(rng)
+ s[1] = np.nan
+
+ result = s.dt.components
+ self.assertFalse(result.iloc[0].isnull().all())
+ self.assertTrue(result.iloc[1].isnull().all())
+
+ def test_constructor(self):
+ expected = TimedeltaIndex(['1 days','1 days 00:00:05',
+ '2 days','2 days 00:00:02','0 days 00:00:03'])
+ result = TimedeltaIndex(['1 days','1 days, 00:00:05',
+ np.timedelta64(2,'D'),
+ timedelta(days=2,seconds=2),
+ pd.offsets.Second(3)])
+ tm.assert_index_equal(result,expected)
+
+ def test_constructor_coverage(self):
+ rng = timedelta_range('1 days', periods=10.5)
+ exp = timedelta_range('1 days', periods=10)
+ self.assertTrue(rng.equals(exp))
+
+ self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
+ periods='foo', freq='D')
+
+ self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
+ end='10 days')
+
+ self.assertRaises(ValueError, TimedeltaIndex, '1 days')
+
+ # generator expression
+ gen = (timedelta(i) for i in range(10))
+ result = TimedeltaIndex(gen)
+ expected = TimedeltaIndex([timedelta(i) for i in range(10)])
+ self.assertTrue(result.equals(expected))
+
+ # NumPy string array
+ strings = np.array(['1 days', '2 days', '3 days'])
+ result = TimedeltaIndex(strings)
+ expected = to_timedelta([1,2,3],unit='d')
+ self.assertTrue(result.equals(expected))
+
+ from_ints = TimedeltaIndex(expected.asi8)
+ self.assertTrue(from_ints.equals(expected))
+
+ # non-conforming freq
+ self.assertRaises(ValueError, TimedeltaIndex,
+ ['1 days', '2 days', '4 days'],
+ freq='D')
+
+ self.assertRaises(ValueError, TimedeltaIndex, periods=10, freq='D')
+
+ def test_constructor_name(self):
+ idx = TimedeltaIndex(start='1 days', periods=1, freq='D',
+ name='TEST')
+ self.assertEqual(idx.name, 'TEST')
+
+ def test_freq_conversion(self):
+
+ # doc example
+
+ # series
+ td = Series(date_range('20130101',periods=4)) - \
+ Series(date_range('20121201',periods=4))
+ td[2] += timedelta(minutes=5,seconds=3)
+ td[3] = np.nan
+
+ result = td / np.timedelta64(1,'D')
+ expected = Series([31,31,(31*86400+5*60+3)/86400.0,np.nan])
+ assert_series_equal(result,expected)
+
+ result = td.astype('timedelta64[D]')
+ expected = Series([31,31,31,np.nan])
+ assert_series_equal(result,expected)
+
+ result = td / np.timedelta64(1,'s')
+ expected = Series([31*86400,31*86400,31*86400+5*60+3,np.nan])
+ assert_series_equal(result,expected)
+
+ result = td.astype('timedelta64[s]')
+ assert_series_equal(result,expected)
+
+ # tdi
+ td = TimedeltaIndex(td)
+
+ result = td / np.timedelta64(1,'D')
+ expected = Index([31,31,(31*86400+5*60+3)/86400.0,np.nan])
+ assert_index_equal(result,expected)
+
+ result = td.astype('timedelta64[D]')
+ expected = Index([31,31,31,np.nan])
+ assert_index_equal(result,expected)
+
+ result = td / np.timedelta64(1,'s')
+ expected = Index([31*86400,31*86400,31*86400+5*60+3,np.nan])
+ assert_index_equal(result,expected)
+
+ result = td.astype('timedelta64[s]')
+ assert_index_equal(result,expected)
+
+ def test_comparisons_coverage(self):
+ rng = timedelta_range('1 days', periods=10)
+
+ result = rng < rng[3]
+ exp = np.array([True, True, True]+[False]*7)
+ self.assert_numpy_array_equal(result, exp)
+
+ # raise TypeError for now
+ self.assertRaises(TypeError, rng.__lt__, rng[3].value)
+
+ result = rng == list(rng)
+ exp = rng == rng
+ self.assert_numpy_array_equal(result, exp)
+
+ def test_comparisons_nat(self):
+ if PY3_2:
+ raise nose.SkipTest('nat comparisons on 3.2 broken')
+
+ tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
+ '1 day 00:00:01', '5 day 00:00:03'])
+ tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
+ '1 day 00:00:02', '5 days 00:00:03'])
+ tdarr = np.array([np.timedelta64(2,'D'),
+ np.timedelta64(2,'D'),
+ np.timedelta64('nat'), np.timedelta64('nat'),
+ np.timedelta64(1,'D') + np.timedelta64(2,'s'),
+ np.timedelta64(5,'D') + np.timedelta64(3,'s')])
+
+ if _np_version_under1p8:
+ # cannot test array because np.datetime('nat') returns today's date
+ cases = [(tdidx1, tdidx2)]
+ else:
+ cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
+
+ # Check pd.NaT is handles as the same as np.nan
+ for idx1, idx2 in cases:
+
+ result = idx1 < idx2
+ expected = np.array([True, False, False, False, True, False])
+ self.assert_numpy_array_equal(result, expected)
+
+ result = idx2 > idx1
+ expected = np.array([True, False, False, False, True, False])
+ self.assert_numpy_array_equal(result, expected)
+
+ result = idx1 <= idx2
+ expected = np.array([True, False, False, False, True, True])
+ self.assert_numpy_array_equal(result, expected)
+
+ result = idx2 >= idx1
+ expected = np.array([True, False, False, False, True, True])
+ self.assert_numpy_array_equal(result, expected)
+
+ result = idx1 == idx2
+ expected = np.array([False, False, False, False, False, True])
+ self.assert_numpy_array_equal(result, expected)
+
+ result = idx1 != idx2
+ expected = np.array([True, True, True, True, True, False])
+ self.assert_numpy_array_equal(result, expected)
+
+ def test_map(self):
+
+ rng = timedelta_range('1 day', periods=10)
+
+ f = lambda x: x.days
+ result = rng.map(f)
+ exp = [f(x) for x in rng]
+ self.assert_numpy_array_equal(result, exp)
+
+ def test_misc_coverage(self):
+
+ rng = timedelta_range('1 day', periods=5)
+ result = rng.groupby(rng.days)
+ tm.assert_isinstance(list(result.values())[0][0], Timedelta)
+
+ idx = TimedeltaIndex(['3d','1d','2d'])
+ self.assertTrue(idx.equals(list(idx)))
+
+ non_td = Index(list('abc'))
+ self.assertFalse(idx.equals(list(non_td)))
+
+ def test_union(self):
+
+ i1 = timedelta_range('1day',periods=5)
+ i2 = timedelta_range('3day',periods=5)
+ result = i1.union(i2)
+ expected = timedelta_range('1day',periods=7)
+ self.assert_numpy_array_equal(result, expected)
+
+ i1 = Int64Index(np.arange(0, 20, 2))
+ i2 = TimedeltaIndex(start='1 day', periods=10, freq='D')
+ i1.union(i2) # Works
+ i2.union(i1) # Fails with "AttributeError: can't set attribute"
+
+ def test_union_coverage(self):
+
+ idx = TimedeltaIndex(['3d','1d','2d'])
+ ordered = TimedeltaIndex(idx.order(), freq='infer')
+ result = ordered.union(idx)
+ self.assertTrue(result.equals(ordered))
+
+ result = ordered[:0].union(ordered)
+ self.assertTrue(result.equals(ordered))
+ self.assertEqual(result.freq, ordered.freq)
+
+ def test_union_bug_1730(self):
+
+ rng_a = timedelta_range('1 day', periods=4, freq='3H')
+ rng_b = timedelta_range('1 day', periods=4, freq='4H')
+
+ result = rng_a.union(rng_b)
+ exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
+ self.assertTrue(result.equals(exp))
+
+ def test_union_bug_1745(self):
+
+ left = TimedeltaIndex(['1 day 15:19:49.695000'])
+ right = TimedeltaIndex(['2 day 13:04:21.322000',
+ '1 day 15:27:24.873000',
+ '1 day 15:31:05.350000'])
+
+ result = left.union(right)
+ exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
+ self.assertTrue(result.equals(exp))
+
+ def test_union_bug_4564(self):
+
+ left = timedelta_range("1 day","30d")
+ right = left + pd.offsets.Minute(15)
+
+ result = left.union(right)
+ exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
+ self.assertTrue(result.equals(exp))
+
+ def test_intersection_bug_1708(self):
+ index_1 = timedelta_range('1 day', periods=4, freq='h')
+ index_2 = index_1 + pd.offsets.Hour(5)
+
+ result = index_1 & index_2
+ self.assertEqual(len(result), 0)
+
+ index_1 = timedelta_range('1 day', periods=4, freq='h')
+ index_2 = index_1 + pd.offsets.Hour(1)
+
+ result = index_1 & index_2
+ expected = timedelta_range('1 day 01:00:00',periods=3,freq='h')
+ tm.assert_index_equal(result,expected)
+
+ def test_get_duplicates(self):
+ idx = TimedeltaIndex(['1 day','2 day','2 day','3 day','3day', '4day'])
+
+ result = idx.get_duplicates()
+ ex = TimedeltaIndex(['2 day','3day'])
+ self.assertTrue(result.equals(ex))
+
+ def test_argmin_argmax(self):
+ idx = TimedeltaIndex(['1 day 00:00:05','1 day 00:00:01','1 day 00:00:02'])
+ self.assertEqual(idx.argmin(), 1)
+ self.assertEqual(idx.argmax(), 0)
+
+ def test_order(self):
+
+ idx = TimedeltaIndex(['4d','1d','2d'])
+
+ ordered = idx.order()
+ self.assertTrue(ordered.is_monotonic)
+
+ ordered = idx.order(ascending=False)
+ self.assertTrue(ordered[::-1].is_monotonic)
+
+ ordered, dexer = idx.order(return_indexer=True)
+ self.assertTrue(ordered.is_monotonic)
+ self.assert_numpy_array_equal(dexer, [1, 2, 0])
+
+ ordered, dexer = idx.order(return_indexer=True, ascending=False)
+ self.assertTrue(ordered[::-1].is_monotonic)
+ self.assert_numpy_array_equal(dexer, [0, 2, 1])
+
+ def test_insert(self):
+
+ idx = TimedeltaIndex(['4day','1day','2day'], name='idx')
+
+ result = idx.insert(2, timedelta(days=5))
+ exp = TimedeltaIndex(['4day','1day','5day','2day'],name='idx')
+ self.assertTrue(result.equals(exp))
+
+ # insertion of non-datetime should coerce to object index
+ result = idx.insert(1, 'inserted')
+ expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
+ Timedelta('2day')], name='idx')
+ self.assertNotIsInstance(result, TimedeltaIndex)
+ tm.assert_index_equal(result, expected)
+ self.assertEqual(result.name, expected.name)
+
+ idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
+
+ # preserve freq
+ expected_0 = TimedeltaIndex(['1day','1day 00:00:01','1day 00:00:02','1day 00:00:03'],
+ name='idx', freq='s')
+ expected_3 = TimedeltaIndex(['1day 00:00:01','1day 00:00:02','1day 00:00:03','1day 00:00:04'],
+ name='idx', freq='s')
+
+ # reset freq to None
+ expected_1_nofreq = TimedeltaIndex(['1day 00:00:01','1day 00:00:01','1day 00:00:02','1day 00:00:03'],
+ name='idx', freq=None)
+ expected_3_nofreq = TimedeltaIndex(['1day 00:00:01','1day 00:00:02','1day 00:00:03','1day 00:00:05'],
+ name='idx', freq=None)
+
+ cases = [(0, Timedelta('1day'), expected_0),
+ (-3, Timedelta('1day'), expected_0),
+ (3, Timedelta('1day 00:00:04'), expected_3),
+ (1, Timedelta('1day 00:00:01'), expected_1_nofreq),
+ (3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
+
+ for n, d, expected in cases:
+ result = idx.insert(n, d)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(result.freq, expected.freq)
+
+ def test_delete(self):
+ idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
+
+ # prserve freq
+ expected_0 = timedelta_range(start='2 Days', periods=4, freq='D', name='idx')
+ expected_4 = timedelta_range(start='1 Days', periods=4, freq='D', name='idx')
+
+ # reset freq to None
+ expected_1 = TimedeltaIndex(['1 day','3 day','4 day', '5 day'],freq=None,name='idx')
+
+ cases ={0: expected_0, -5: expected_0,
+ -1: expected_4, 4: expected_4,
+ 1: expected_1}
+ for n, expected in compat.iteritems(cases):
+ result = idx.delete(n)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(result.freq, expected.freq)
+
+ with tm.assertRaises((IndexError, ValueError)):
+ # either depeidnig on numpy version
+ result = idx.delete(5)
+
+ def test_delete_slice(self):
+ idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
+
+ # prserve freq
+ expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D', name='idx')
+ expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D', name='idx')
+
+ # reset freq to None
+ expected_3_5 = TimedeltaIndex(['1 d','2 d','3 d',
+ '7 d','8 d','9 d','10d'], freq=None, name='idx')
+
+ cases ={(0, 1, 2): expected_0_2,
+ (7, 8, 9): expected_7_9,
+ (3, 4, 5): expected_3_5}
+ for n, expected in compat.iteritems(cases):
+ result = idx.delete(n)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.delete(slice(n[0], n[-1] + 1))
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+ self.assertEqual(result.freq, expected.freq)
+
+ def test_take(self):
+
+ tds = ['1day 02:00:00','1 day 04:00:00','1 day 10:00:00']
+ idx = TimedeltaIndex(start='1d',end='2d',freq='H',name='idx')
+ expected = TimedeltaIndex(tds, freq=None, name='idx')
+
+ taken1 = idx.take([2, 4, 10])
+ taken2 = idx[[2,4,10]]
+
+ for taken in [taken1, taken2]:
+ self.assertTrue(taken.equals(expected))
+ tm.assert_isinstance(taken, TimedeltaIndex)
+ self.assertIsNone(taken.freq)
+ self.assertEqual(taken.name, expected.name)
+
+ def test_isin(self):
+
+ index = tm.makeTimedeltaIndex(4)
+ result = index.isin(index)
+ self.assertTrue(result.all())
+
+ result = index.isin(list(index))
+ self.assertTrue(result.all())
+
+ assert_almost_equal(index.isin([index[2], 5]),
+ [False, False, True, False])
+
+ def test_does_not_convert_mixed_integer(self):
+ df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
+ randn(), r_idx_type='i', c_idx_type='td')
+ str(df)
+
+ cols = df.columns.join(df.index, how='outer')
+ joined = cols.join(df.columns)
+ self.assertEqual(cols.dtype, np.dtype('O'))
+ self.assertEqual(cols.dtype, joined.dtype)
+ tm.assert_index_equal(cols, joined)
+
+ def test_slice_keeps_name(self):
+
+ # GH4226
+ dr = pd.timedelta_range('1d','5d', freq='H', name='timebucket')
+ self.assertEqual(dr[1:].name, dr.name)
+
+ def test_join_self(self):
+
+ index = timedelta_range('1 day', periods=10)
+ kinds = 'outer', 'inner', 'left', 'right'
+ for kind in kinds:
+ joined = index.join(index, how=kind)
+ self.assertIs(index, joined)
+
+ def test_factorize(self):
+ idx1 = TimedeltaIndex(['1 day','1 day','2 day',
+ '2 day','3 day','3 day'])
+
+ exp_arr = np.array([0, 0, 1, 1, 2, 2])
+ exp_idx = TimedeltaIndex(['1 day','2 day','3 day'])
+
+ arr, idx = idx1.factorize()
+ self.assert_numpy_array_equal(arr, exp_arr)
+ self.assertTrue(idx.equals(exp_idx))
+
+ arr, idx = idx1.factorize(sort=True)
+ self.assert_numpy_array_equal(arr, exp_arr)
+ self.assertTrue(idx.equals(exp_idx))
+
+ # freq must be preserved
+ idx3 = timedelta_range('1 day', periods=4, freq='s')
+ exp_arr = np.array([0, 1, 2, 3])
+ arr, idx = idx3.factorize()
+ self.assert_numpy_array_equal(arr, exp_arr)
+ self.assertTrue(idx.equals(idx3))
+
+class TestSlicing(tm.TestCase):
+
+ def test_partial_slice(self):
+ rng = timedelta_range('1 day 10:11:12', freq='h',periods=500)
+ s = Series(np.arange(len(rng)), index=rng)
+
+ result = s['5 day':'6 day']
+ expected = s.iloc[86:134]
+ assert_series_equal(result, expected)
+
+ result = s['5 day':]
+ expected = s.iloc[86:]
+ assert_series_equal(result, expected)
+
+ result = s[:'6 day']
+ expected = s.iloc[:134]
+ assert_series_equal(result, expected)
+
+ result = s['6 days, 23:11:12']
+ self.assertEqual(result, s.irow(133))
+
+ self.assertRaises(KeyError, s.__getitem__, '50 days')
+
+ def test_partial_slice_high_reso(self):
+
+ # higher reso
+ rng = timedelta_range('1 day 10:11:12', freq='us',periods=2000)
+ s = Series(np.arange(len(rng)), index=rng)
+
+ result = s['1 day 10:11:12':]
+ expected = s.iloc[0:]
+ assert_series_equal(result, expected)
+
+ result = s['1 day 10:11:12.001':]
+ expected = s.iloc[1000:]
+ assert_series_equal(result, expected)
+
+ result = s['1 days, 10:11:12.001001']
+ self.assertEqual(result, s.irow(1001))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 828c2a554b02d..1980924483bfb 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -332,7 +332,6 @@ def test_dti_slicing(self):
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
-
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py
index e762ebe9d85cf..ad8c2c0f09ea1 100644
--- a/pandas/tseries/timedeltas.py
+++ b/pandas/tseries/timedeltas.py
@@ -12,19 +12,17 @@
is_timedelta64_dtype, _values_from_object,
is_list_like, isnull, _ensure_object)
-repr_timedelta = tslib.repr_timedelta64
-repr_timedelta64 = tslib.repr_timedelta64
-
-def to_timedelta(arg, box=True, unit='ns'):
+def to_timedelta(arg, unit='ns', box=True):
"""
Convert argument to timedelta
Parameters
----------
arg : string, timedelta, array of strings (with possible NAs)
- box : boolean, default True
- If True returns a Series of the results, if False returns ndarray of values
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an integer/float number
+ box : boolean, default True
+ If True returns a Timedelta/TimedeltaIndex of the results
+ if False returns a np.timedelta64 or ndarray of values of dtype timedelta64[ns]
Returns
-------
@@ -34,8 +32,8 @@ def to_timedelta(arg, box=True, unit='ns'):
def _convert_listlike(arg, box, unit):
- if isinstance(arg, (list,tuple)):
- arg = np.array(arg, dtype='O')
+ if isinstance(arg, (list,tuple)) or ((hasattr(arg,'__iter__') and not hasattr(arg,'dtype'))):
+ arg = np.array(list(arg), dtype='O')
if is_timedelta64_dtype(arg):
value = arg.astype('timedelta64[ns]')
@@ -47,11 +45,16 @@ def _convert_listlike(arg, box, unit):
try:
value = tslib.array_to_timedelta64(_ensure_object(arg), unit=unit)
except:
- value = np.array([ _coerce_scalar_to_timedelta_type(r, unit=unit) for r in arg ])
+
+ # try to process strings fast; may need to fallback
+ try:
+ value = np.array([ _get_string_converter(r, unit=unit)() for r in arg ],dtype='m8[ns]')
+ except:
+ value = np.array([ _coerce_scalar_to_timedelta_type(r, unit=unit) for r in arg ])
if box:
- from pandas import Series
- value = Series(value,dtype='m8[ns]')
+ from pandas import TimedeltaIndex
+ value = TimedeltaIndex(value,unit='ns')
return value
if arg is None:
@@ -64,7 +67,7 @@ def _convert_listlike(arg, box, unit):
return _convert_listlike(arg, box=box, unit=unit)
# ...so it must be a scalar value. Return scalar.
- return _coerce_scalar_to_timedelta_type(arg, unit=unit)
+ return _coerce_scalar_to_timedelta_type(arg, unit=unit, box=box)
_unit_map = {
'Y' : 'Y',
@@ -92,24 +95,48 @@ def _convert_listlike(arg, box, unit):
'NS' : 'ns',
'ns' : 'ns',
}
+_unit_scale = {
+ 'd' : 86400*1e9,
+ 'h' : 3600*1e9,
+ 'm' : 60*1e9,
+ 's' : 1e9,
+ 'ms' : 1e6,
+ 'us' : 1e3,
+ 'ns' : 1,
+ }
def _validate_timedelta_unit(arg):
""" provide validation / translation for timedelta short units """
try:
return _unit_map[arg]
except:
+ if arg is None:
+ return 'ns'
raise ValueError("invalid timedelta unit {0} provided".format(arg))
_short_search = re.compile(
"^\s*(?P<neg>-?)\s*(?P<value>\d*\.?\d*)\s*(?P<unit>d|s|ms|us|ns)?\s*$",re.IGNORECASE)
_full_search = re.compile(
- "^\s*(?P<neg>-?)\s*(?P<days>\d+)?\s*(days|d|day)?,?\s*(?P<time>\d{2}:\d{2}:\d{2})?(?P<frac>\.\d+)?\s*$",re.IGNORECASE)
+ "^\s*(?P<neg>-?)\s*(?P<days>\d*\.?\d*)?\s*(days|d|day)?,?\s*\+?(?P<time>\d{2}:\d{2}:\d{2})?(?P<frac>\.\d+)?\s*$",re.IGNORECASE)
_nat_search = re.compile(
"^\s*(nat|nan)\s*$",re.IGNORECASE)
_whitespace = re.compile('^\s*$')
+_number_split = re.compile("^(\d+\.?\d*)")
+
+# construct the full2_search
+abbrevs = [('d' ,'days|d|day'),
+ ('h' ,'hours|h|hour'),
+ ('m' ,'minutes|min|minute|m'),
+ ('s' ,'seconds|sec|second|s'),
+ ('ms','milliseconds|milli|millis|millisecond|ms'),
+ ('us','microseconds|micro|micros|microsecond|us'),
+ ('ns','nanoseconds|nano|nanos|nanosecond|ns')]
-def _coerce_scalar_to_timedelta_type(r, unit='ns'):
- """ convert strings to timedelta; coerce to np.timedelta64"""
+_full_search2 = re.compile(''.join(
+ ["^\s*(?P<neg>-?)\s*"] + [ "(?P<" + p + ">\\d+\.?\d*\s*(" + ss + "))?\\s*" for p, ss in abbrevs ] + ['$']))
+
+def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True):
+ """ convert strings to timedelta; coerce to Timedelta (if box), else np.timedelta64"""
if isinstance(r, compat.string_types):
@@ -118,12 +145,21 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns'):
r = converter()
unit='ns'
- return tslib.convert_to_timedelta(r,unit)
+ result = tslib.convert_to_timedelta(r,unit)
+ if box:
+ result = tslib.Timedelta(result)
+
+ return result
def _get_string_converter(r, unit='ns'):
""" return a string converter for r to process the timedelta format """
# treat as a nan
+ if isnull(r):
+ def convert(r=None, unit=None):
+ return tslib.iNaT
+ return convert
+
if _whitespace.search(r):
def convert(r=None, unit=None):
return tslib.iNaT
@@ -141,9 +177,10 @@ def convert(r=None, unit=unit, m=m):
u = gd.get('unit')
if u is not None:
unit = u.lower()
+ result = tslib.cast_from_unit(r, unit)
if gd['neg']:
- r *= -1
- return tslib.cast_from_unit(r, unit)
+ result *= -1
+ return result
return convert
m = _full_search.search(r)
@@ -154,21 +191,66 @@ def convert(r=None, unit=None, m=m):
gd = m.groupdict()
- # convert to seconds
- value = float(gd['days'] or 0) * 86400
-
+ # handle time
+ value = 0
time = gd['time']
if time:
(hh,mm,ss) = time.split(':')
- value += float(hh)*3600 + float(mm)*60 + float(ss)
+ value += int((float(hh)*3600 + float(mm)*60 + float(ss))*1e9)
+ # handle frac
frac = gd['frac']
if frac:
- value += float(frac)
+ value += round(float(frac)*1e9)
+
+ # handle days (possibly negative)
+ is_neg = gd['neg']
+ if gd['days']:
+ days = int((float(gd['days'] or 0) * 86400)*1e9)
+ if gd['neg']:
+ days *= -1
+ value += days
+ else:
+ if gd['neg']:
+ value *= -1
+
+ return tslib.cast_from_unit(value, 'ns')
+ return convert
- if gd['neg']:
- value *= -1
- return tslib.cast_from_unit(value, 's')
+ # look for combo strings
+ m = _full_search2.search(r)
+ if m:
+ def convert(r=None, unit=None, m=m):
+ if r is not None:
+ m = _full_search2.search(r)
+
+ gd = m.groupdict()
+
+ # the parser
+ def parse(k, v):
+ if v is None:
+ return 0
+ v = float(_number_split.search(v).group())
+ return int(v*_unit_scale[k])
+
+ # handle non-days
+ days = gd.pop('days',None)
+ neg = gd.pop('neg',None)
+ value = 0
+ for k, v in gd.items():
+ value += parse(k,v)
+
+ # parse days / neg
+ if days:
+ days = parse('days',days)
+ if neg:
+ days *= -1
+ value += days
+ else:
+ if neg:
+ value *= -1
+
+ return tslib.cast_from_unit(value, 'ns')
return convert
m = _nat_search.search(r)
@@ -209,4 +291,3 @@ def _possibly_cast_to_timedelta(value, coerce=True, dtype=None):
value = np.array(new_value, dtype='timedelta64[ns]')
return value
-
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index c05d85a39441e..7cba1cf6ccffe 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -22,7 +22,7 @@ cdef extern from "Python.h":
# this is our datetime.pxd
from datetime cimport *
-from util cimport is_integer_object, is_datetime64_object, is_timedelta64_object
+from util cimport is_integer_object, is_float_object, is_datetime64_object, is_timedelta64_object
from libc.stdlib cimport free
@@ -37,10 +37,11 @@ from datetime import time as datetime_time
from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal, tzfile as _dateutil_tzfile,
tzutc as _dateutil_tzutc, gettz as _dateutil_gettz)
from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
-from pandas.compat import parse_date
-from pandas.compat import parse_date, string_types
+from pandas.compat import parse_date, string_types, PY3
from sys import version_info
+import operator
+import collections
# GH3363
cdef bint PY2 = version_info[0] == 2
@@ -155,6 +156,27 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, offset=None, box=False):
return result
+def ints_to_pytimedelta(ndarray[int64_t] arr, box=False):
+ # convert an i8 repr to an ndarray of timedelta or Timedelta (if box == True)
+
+ cdef:
+ Py_ssize_t i, n = len(arr)
+ int64_t value
+ ndarray[object] result = np.empty(n, dtype=object)
+
+ for i in range(n):
+
+ value = arr[i]
+ if value == iNaT:
+ result[i] = NaT
+ else:
+ if box:
+ result[i] = Timedelta(value)
+ else:
+ result[i] = timedelta(microseconds=int(value)/1000)
+
+ return result
+
cdef inline bint _is_tzlocal(object tz):
return isinstance(tz, _dateutil_tzlocal)
@@ -374,14 +396,14 @@ class Timestamp(_Timestamp):
that this flag is only applicable for ambiguous fall dst dates)
- 'NaT' will return NaT for an ambiguous time
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
-
+
Returns
-------
localized : Timestamp
"""
if ambiguous == 'infer':
raise ValueError('Cannot infer offset with only one time.')
-
+
if self.tzinfo is None:
# tz naive, localize
tz = maybe_get_tz(tz)
@@ -577,6 +599,7 @@ cpdef object get_value_box(ndarray arr, object loc):
cdef:
Py_ssize_t i, sz
void* data_ptr
+
if util.is_float_object(loc):
casted = int(loc)
if casted == loc:
@@ -592,6 +615,8 @@ cpdef object get_value_box(ndarray arr, object loc):
if arr.descr.type_num == NPY_DATETIME:
return Timestamp(util.get_value_1d(arr, i))
+ elif arr.descr.type_num == NPY_TIMEDELTA:
+ return Timedelta(util.get_value_1d(arr, i))
else:
return util.get_value_1d(arr, i)
@@ -800,6 +825,10 @@ cdef class _Timestamp(datetime):
result = Timestamp(normalize_date(result))
return result
+ # index/series like
+ elif hasattr(other, '_typ'):
+ return other + self
+
result = datetime.__add__(self, other)
if isinstance(result, datetime):
result = Timestamp(result)
@@ -811,6 +840,15 @@ cdef class _Timestamp(datetime):
or isinstance(other, timedelta) or hasattr(other, 'delta'):
neg_other = -other
return self + neg_other
+
+ # a Timestamp-DatetimeIndex -> yields a negative TimedeltaIndex
+ elif getattr(other,'_typ',None) == 'datetimeindex':
+ return -other.__sub__(self)
+
+ # a Timestamp-TimedeltaIndex -> yields a negative TimedeltaIndex
+ elif getattr(other,'_typ',None) == 'timedeltaindex':
+ return (-other).__add__(self)
+
elif other is NaT:
return NaT
return datetime.__sub__(self, other)
@@ -873,6 +911,7 @@ cdef class _NaT(_Timestamp):
return NaT
def __sub__(self, other):
+
if type(self) is datetime:
other, self = self, other
try:
@@ -1154,7 +1193,7 @@ cpdef inline object maybe_get_tz(object tz):
tz._filename = zone
else:
tz = pytz.timezone(tz)
- elif util.is_integer_object(tz):
+ elif is_integer_object(tz):
tz = pytz.FixedOffset(tz / 60)
return tz
@@ -1323,13 +1362,13 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
raise
# if we are coercing, dont' allow integers
- elif util.is_integer_object(val) and not coerce:
+ elif is_integer_object(val) and not coerce:
if val == iNaT:
iresult[i] = iNaT
else:
iresult[i] = val*m
seen_integer=1
- elif util.is_float_object(val) and not coerce:
+ elif is_float_object(val) and not coerce:
if val != val or val == iNaT:
iresult[i] = iNaT
else:
@@ -1429,6 +1468,510 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
return oresult
+# Similar to Timestamp/datetime, this is a construction requirement for timedeltas
+# we need to do object instantiation in python
+# This will serve as a C extension type that
+# shadows the python class, where we do any heavy lifting.
+
+cdef class _Timedelta(timedelta):
+
+ cdef readonly:
+ int64_t value # nanoseconds
+ object freq # frequency reference
+ bint is_populated # are my components populated
+ int64_t _sign, _d, _h, _m, _s, _ms, _us, _ns
+
+ def __hash__(_Timedelta self):
+ return hash(self.value)
+
+ def __richcmp__(_Timedelta self, object other, int op):
+ cdef:
+ _Timedelta ots
+ int ndim
+
+ if isinstance(other, _Timedelta):
+ if isinstance(other, _NaT):
+ return _cmp_nat_dt(other, self, _reverse_ops[op])
+ ots = other
+ elif isinstance(other, timedelta):
+ ots = Timedelta(other)
+ else:
+ ndim = getattr(other, _NDIM_STRING, -1)
+
+ if ndim != -1:
+ if ndim == 0:
+ if isinstance(other, np.timedelta64):
+ other = Timedelta(other)
+ else:
+ if op == Py_EQ:
+ return False
+ elif op == Py_NE:
+ return True
+
+ # only allow ==, != ops
+ raise TypeError('Cannot compare type %r with type %r' %
+ (type(self).__name__,
+ type(other).__name__))
+ return PyObject_RichCompare(other, self, _reverse_ops[op])
+ else:
+ if op == Py_EQ:
+ return False
+ elif op == Py_NE:
+ return True
+ raise TypeError('Cannot compare type %r with type %r' %
+ (type(self).__name__, type(other).__name__))
+
+ return _cmp_scalar(self.value, ots.value, op)
+
+ def _ensure_components(_Timedelta self):
+ """
+ compute the components
+ """
+ cdef int64_t sfrac, ifrac, ivalue = self.value
+ cdef float64_t frac
+
+ if self.is_populated:
+ return
+
+ # put frac in seconds
+ frac = float(ivalue)/1e9
+ if frac < 0:
+ self._sign = -1
+
+ # even fraction
+ if int(-frac/86400) != -frac/86400.0:
+ self._d = int(-frac/86400.0+1)
+ frac += 86400*self._d
+ else:
+ frac = -frac
+ else:
+ self._sign = 1
+ self._d = 0
+
+ if frac >= 86400:
+ self._d += int(frac / 86400)
+ frac -= self._d * 86400
+
+ if frac >= 3600:
+ self._h = int(frac / 3600)
+ frac -= self._h * 3600
+ else:
+ self._h = 0
+
+ if frac >= 60:
+ self._m = int(frac / 60)
+ frac -= self._m * 60
+ else:
+ self._m = 0
+
+ if frac >= 0:
+ self._s = int(frac)
+ frac -= self._s
+ else:
+ self._s = 0
+
+ if frac != 0:
+
+ # reset so we don't lose precision
+ sfrac = int((self._h*3600 + self._m*60 + self._s)*1e9)
+ if self._sign < 0:
+ ifrac = ivalue + self._d*DAY_NS - sfrac
+ else:
+ ifrac = ivalue - (self._d*DAY_NS + sfrac)
+
+ self._ms = int(ifrac/1e6)
+ ifrac -= self._ms*1000*1000
+ self._us = int(ifrac/1e3)
+ ifrac -= self._us*1000
+ self._ns = ifrac
+ else:
+ self._ms = 0
+ self._us = 0
+ self._ns = 0
+
+ self.is_populated = 1
+
+ cpdef timedelta to_pytimedelta(_Timedelta self):
+ """
+ return an actual datetime.timedelta object
+ note: we lose nanosecond resolution if any
+ """
+ return timedelta(microseconds=int(self.value)/1000)
+
+# components named tuple
+Components = collections.namedtuple('Components',['days','hours','minutes','seconds','milliseconds','microseconds','nanoseconds'])
+
+# Python front end to C extension type _Timedelta
+# This serves as the box for timedelta64
+class Timedelta(_Timedelta):
+ """
+ Represents a duration, the difference between two dates or times.
+
+ Timedelta is the pandas equivalent of python's ``datetime.timedelta``
+ and is interchangable with it in most cases.
+
+ Parameters
+ ----------
+ value : Timedelta, timedelta, np.timedelta64, string, or integer
+ unit : string, [D,h,m,s,ms,us,ns]
+ Denote the unit of the input, if input is an integer. Default 'ns'.
+ days, seconds, microseconds, milliseconds, minutes, hours, weeks : numeric, optional
+ Values for construction in compat with datetime.timedelta.
+
+ Notes
+ -----
+ The ``.value`` attribute is always in ns.
+
+ """
+
+ def __new__(cls, object value=None, unit=None, **kwargs):
+ cdef _Timedelta td_base
+
+ if value is None:
+ if not len(kwargs):
+ raise ValueError("cannot construct a TimeDelta without a value/unit or descriptive keywords (days,seconds....)")
+ try:
+ value = timedelta(**kwargs)
+ except (TypeError):
+ raise ValueError("cannot construct a TimeDelta from the passed arguments, allowed keywords are "
+ "[days, seconds, microseconds, milliseconds, minutes, hours, weeks]")
+
+ if isinstance(value, Timedelta):
+ value = value.value
+ elif util.is_string_object(value):
+ from pandas import to_timedelta
+ value = to_timedelta(value,unit=unit,box=False)
+ elif isinstance(value, timedelta):
+ value = convert_to_timedelta64(value,'ns',False)
+ elif isinstance(value, np.timedelta64):
+ if unit is not None:
+ value = value.astype('timedelta64[{0}]'.format(unit))
+ value = value.astype('timedelta64[ns]')
+ elif hasattr(value,'delta'):
+ value = np.timedelta64(_delta_to_nanoseconds(value.delta),'ns')
+ elif is_integer_object(value) or util.is_float_object(value):
+ # unit=None is de-facto 'ns'
+ value = convert_to_timedelta64(value,unit,False)
+ elif _checknull_with_nat(value):
+ return NaT
+ else:
+ raise ValueError("Value must be Timedelta, string, integer, float, timedelta or convertible")
+
+ if isinstance(value, np.timedelta64):
+ value = value.view('i8')
+
+ # nat
+ if value == NPY_NAT:
+ return NaT
+
+ # make timedelta happy
+ td_base = _Timedelta.__new__(cls, microseconds=int(value)/1000)
+ td_base.value = value
+ td_base.is_populated = 0
+ return td_base
+
+ @property
+ def delta(self):
+ """ return out delta in ns (for internal compat) """
+ return self.value
+
+ @property
+ def asm8(self):
+ """ return a numpy timedelta64 array view of myself """
+ return np.int64(self.value).view('m8[ns]')
+
+ @property
+ def resolution(self):
+ """ return a string representing the lowest resolution that we have """
+
+ self._ensure_components()
+ if self._ns:
+ return "ns"
+ elif self._us:
+ return "us"
+ elif self._ms:
+ return "ms"
+ elif self._s:
+ return "s"
+ elif self._m:
+ return "m"
+ elif self._h:
+ return "h"
+ elif self._d:
+ return "D"
+ raise ValueError("invalid resolution")
+
+ def round(self, reso):
+ """
+ return a new Timedelta rounded to this resolution
+
+ Parameters
+ ----------
+ reso : a string indicating the rouding resolution, accepting values
+ d,h,m,s,ms,us
+
+ """
+ cdef int64_t frac, value = np.abs(self.value)
+
+ self._ensure_components()
+ frac = int(self._ms*1e6 + self._us*1e3+ self._ns)
+ if reso == 'us':
+ value -= self._ns
+ elif reso == 'ms':
+ value -= self._us*1000 + self._ns
+ elif reso == 's':
+ value -= frac
+ elif reso == 'm':
+ value -= int(self._s*1e9) + frac
+ elif reso == 'h':
+ value -= int((60*self._m + self._s)*1e9) + frac
+ elif reso == 'd' or reso == 'D':
+ value -= int((3600*self._h + 60*self._m + self._s)*1e9) + frac
+ else:
+ raise ValueError("invalid resolution")
+
+ if self._sign < 0:
+ value *= -1
+ return Timedelta(value,unit='ns')
+
+ def _repr_base(self, format=None):
+ """
+
+ Parameters
+ ----------
+ format : None|all|even_day|sub_day|long
+
+ Returns
+ -------
+ converted : string of a Timedelta
+
+ """
+ cdef object sign_pretty, sign2_pretty, seconds_pretty, subs
+
+ self._ensure_components()
+
+ if self._sign < 0:
+ sign_pretty = "-"
+ sign2_pretty = " +"
+ else:
+ sign_pretty = ""
+ sign2_pretty = " "
+
+ # show everything
+ if format == 'all':
+ seconds_pretty = "%02d.%03d%03d%03d" % (self._s, self._ms, self._us, self._ns)
+ return "%s%d days%s%02d:%02d:%s" % (sign_pretty, self._d, sign2_pretty, self._h, self._m, seconds_pretty)
+
+ # by default not showing nano
+ if self._ms or self._us or self._ns:
+ seconds_pretty = "%02d.%03d%03d" % (self._s, self._ms, self._us)
+ else:
+ seconds_pretty = "%02d" % self._s
+
+ # if we have a partial day
+ subs = self._h or self._m or self._s or self._ms or self._us or self._ns
+
+ if format == 'even_day':
+ if not subs:
+ return "%s%d days" % (sign_pretty, self._d)
+
+ elif format == 'sub_day':
+ if not self._d:
+
+ # degenerate, don't need the extra space
+ if self._sign > 0:
+ sign2_pretty = ""
+ return "%s%s%02d:%02d:%s" % (sign_pretty, sign2_pretty, self._h, self._m, seconds_pretty)
+
+ if subs or format=='long':
+ return "%s%d days%s%02d:%02d:%s" % (sign_pretty, self._d, sign2_pretty, self._h, self._m, seconds_pretty)
+ return "%s%d days" % (sign_pretty, self._d)
+
+
+ def __repr__(self):
+ return "Timedelta('{0}')".format(self._repr_base(format='long'))
+ def __str__(self):
+ return self._repr_base(format='long')
+
+ @property
+ def components(self):
+ """ Return a Components NamedTuple-like """
+ self._ensure_components()
+ if self._sign < 0:
+ return Components(-self._d,self._h,self._m,self._s,self._ms,self._us,self._ns)
+
+ # return the named tuple
+ return Components(self._d,self._h,self._m,self._s,self._ms,self._us,self._ns)
+
+ @property
+ def days(self):
+ """ The days for the Timedelta """
+ self._ensure_components()
+ if self._sign < 0:
+ return -1*self._d
+ return self._d
+
+ @property
+ def hours(self):
+ """ The hours for the Timedelta """
+ self._ensure_components()
+ return self._h
+
+ @property
+ def minutes(self):
+ """ The minutes for the Timedelta """
+ self._ensure_components()
+ return self._m
+
+ @property
+ def seconds(self):
+ """ The seconds for the Timedelta """
+ self._ensure_components()
+ return self._s
+
+ @property
+ def milliseconds(self):
+ """ The milliseconds for the Timedelta """
+ self._ensure_components()
+ return self._ms
+
+ @property
+ def microseconds(self):
+ """ The microseconds for the Timedelta """
+ self._ensure_components()
+ return self._us
+
+ @property
+ def nanoseconds(self):
+ """ The nanoseconds for the Timedelta """
+ self._ensure_components()
+ return self._ns
+
+ def __setstate__(self, state):
+ (value) = state
+ self.value = value
+
+ def __reduce__(self):
+ object_state = self.value,
+ return (Timedelta, object_state)
+
+ def view(self, dtype):
+ """ array view compat """
+ return np.timedelta64(self.value).view(dtype)
+
+ def _validate_ops_compat(self, other, op):
+ # return a boolean if we are compat with operating
+ if _checknull_with_nat(other):
+ return True
+ elif isinstance(other, (Timedelta, timedelta, np.timedelta64)):
+ return True
+ elif util.is_string_object(other):
+ return True
+ elif hasattr(other,'delta'):
+ return True
+ raise TypeError("cannot operate add a Timedelta with op {op} for {typ}".format(op=op,typ=type(other)))
+
+ def __add__(self, other):
+
+ # a Timedelta with Series/Index like
+ if hasattr(other,'_typ'):
+ return other + self
+
+ # an offset
+ elif hasattr(other,'delta') and not isinstance(other, Timedelta):
+ return self + other.delta
+
+ # a datetimelike
+ elif isinstance(other, (Timestamp, datetime, np.datetime64)):
+ return Timestamp(other) + self
+
+ self._validate_ops_compat(other,'__add__')
+
+ other = Timedelta(other)
+ if other is NaT:
+ return NaT
+ return Timedelta(self.value + other.value, unit='ns')
+
+ def __sub__(self, other):
+
+ # a Timedelta with Series/Index like
+ if hasattr(other,'_typ'):
+ neg_other = -other
+ return neg_other + self
+
+ # an offset
+ elif hasattr(other,'delta') and not isinstance(other, Timedelta):
+ return self - other.delta
+
+ self._validate_ops_compat(other,'__sub__')
+
+ other = Timedelta(other)
+ if other is NaT:
+ return NaT
+ return Timedelta(self.value - other.value, unit='ns')
+
+ def __mul__(self, other):
+
+ if other is NaT:
+ return NaT
+
+ # only integers allowed
+ if not is_integer_object(other):
+ raise TypeError("cannot multiply a Timedelta with {typ}".format(typ=type(other)))
+
+ return Timedelta(other*self.value, unit='ns')
+
+ __rmul__ = __mul__
+
+ def __truediv__(self, other):
+
+ # a timedelta64 IS an integer object as well
+ if is_timedelta64_object(other):
+ return self.value/float(_delta_to_nanoseconds(other))
+
+ # pure integers
+ elif is_integer_object(other):
+ return Timedelta(self.value/other, unit='ns')
+
+ self._validate_ops_compat(other,'__div__')
+
+ other = Timedelta(other)
+ if other is NaT:
+ return NaT
+
+ return self.value/float(other.value)
+
+ def _make_invalid(opstr):
+
+ def _invalid(other):
+ raise TypeError("cannot perform {opstr} with {typ}".format(opstr=opstr,typ=type(other)))
+
+ __rtruediv__ = _make_invalid('__rtruediv__')
+
+ if not PY3:
+ __div__ = __truediv__
+ __rdiv__ = _make_invalid('__rtruediv__')
+
+ __floordiv__ = _make_invalid('__floordiv__')
+ __rfloordiv__ = _make_invalid('__rfloordiv__')
+
+ def _op_unary_method(func, name):
+
+ def f(self):
+ return Timedelta(func(self.value), unit='ns')
+ f.__name__ = name
+ return f
+
+ __inv__ = _op_unary_method(lambda x: -x, '__inv__')
+ __neg__ = _op_unary_method(lambda x: -x, '__neg__')
+ __pos__ = _op_unary_method(lambda x: x, '__pos__')
+ __abs__ = _op_unary_method(lambda x: abs(x), '__abs__')
+
+cdef PyTypeObject* td_type = <PyTypeObject*> Timedelta
+
+cdef inline bint is_timedelta(object o):
+ return Py_TYPE(o) == td_type # isinstance(o, Timedelta)
+
def array_to_timedelta64(ndarray[object] values, unit='ns', coerce=False):
""" convert an ndarray to an array of ints that are timedeltas
force conversion if coerce = True,
@@ -1453,8 +1996,9 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce):
Convert an incoming object to a timedelta64 if possible
Handle these types of objects:
- - timedelta
+ - timedelta/Timedelta
- timedelta64
+ - an offset
- np.int64 (with unit providing a possible modifier)
- None/NaT
@@ -1467,6 +2011,9 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce):
"""
if _checknull_with_nat(ts):
return np.timedelta64(iNaT)
+ elif isinstance(ts, Timedelta):
+ # already in the proper format
+ ts = np.timedelta64(ts.value)
elif util.is_datetime64_object(ts):
# only accept a NaT here
if ts.astype('int64') == iNaT:
@@ -1484,11 +2031,21 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce):
else:
ts = cast_from_unit(ts, unit)
ts = np.timedelta64(ts)
+ elif is_float_object(ts):
+ if util.is_array(ts):
+ ts = ts.astype('int64').item()
+ if unit in ['Y','M','W']:
+ ts = np.timedelta64(int(ts), unit)
+ else:
+ ts = cast_from_unit(ts, unit)
+ ts = np.timedelta64(ts)
elif util.is_string_object(ts):
if ts in _nat_strings or coerce:
return np.timedelta64(iNaT)
else:
raise ValueError("Invalid type for timedelta scalar: %s" % type(ts))
+ elif hasattr(ts,'delta'):
+ ts = np.timedelta64(_delta_to_nanoseconds(ts),'ns')
if isinstance(ts, timedelta):
ts = np.timedelta64(ts)
@@ -1498,75 +2055,6 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce):
raise ValueError("Invalid type for timedelta scalar: %s" % type(ts))
return ts.astype('timedelta64[ns]')
-def repr_timedelta64(object value, format=None):
- """
- provide repr for timedelta64
-
- Parameters
- ----------
- value : timedelta64
- format : None|"short"|"long"
-
- Returns
- -------
- converted : Timestamp
-
- """
- cdef object ivalue
-
- ivalue = value.view('i8')
-
- # put frac in seconds
- frac = float(ivalue)/1e9
- sign = np.sign(frac)
- frac = np.abs(frac)
-
- if frac >= 86400:
- days = int(frac / 86400)
- frac -= days * 86400
- else:
- days = 0
-
- if frac >= 3600:
- hours = int(frac / 3600)
- frac -= hours * 3600
- else:
- hours = 0
-
- if frac >= 60:
- minutes = int(frac / 60)
- frac -= minutes * 60
- else:
- minutes = 0
-
- if frac >= 1:
- seconds = int(frac)
- frac -= seconds
- else:
- seconds = 0
-
- if frac == int(frac):
- seconds_pretty = "%02d" % seconds
- else:
- sp = abs(round(1e6*frac))
- seconds_pretty = "%02d.%06d" % (seconds, sp)
-
- if sign < 0:
- sign_pretty = "-"
- else:
- sign_pretty = ""
-
- if days or format == 'long':
- if (hours or minutes or seconds or frac) or format != 'short':
- return "%s%d days, %02d:%02d:%s" % (sign_pretty, days, hours, minutes,
- seconds_pretty)
- else:
- return "%s%d days" % (sign_pretty, days)
-
-
- return "%s%02d:%02d:%s" % (sign_pretty, hours, minutes, seconds_pretty)
-
-
def array_strptime(ndarray[object] values, object fmt, coerce=False):
cdef:
Py_ssize_t i, n = len(values)
@@ -1773,7 +2261,7 @@ def array_strptime(ndarray[object] values, object fmt, coerce=False):
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
- else: # Assume that if they bothered to include Julian day it will
+ else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal(
(julian - 1) + datetime_date(year, 1, 1).toordinal())
@@ -1822,9 +2310,10 @@ cdef inline _get_datetime64_nanos(object val):
else:
return ival
-cpdef inline int64_t cast_from_unit(object ts, object unit) except -1:
+cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
""" return a casting of the unit represented to nanoseconds
round the fractional part of a float to our precision, p """
+
if unit == 'D' or unit == 'd':
m = 1000000000L * 86400
p = 6
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index ef9d7d1566ec2..2b775201d9900 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -39,6 +39,7 @@
from pandas import bdate_range
from pandas.tseries.index import DatetimeIndex
+from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas import _testing
@@ -733,25 +734,23 @@ def getArangeMat():
def makeStringIndex(k=10):
return Index([rands(10) for _ in range(k)])
-
def makeUnicodeIndex(k=10):
return Index([randu(10) for _ in range(k)])
-
def makeIntIndex(k=10):
return Index(lrange(k))
-
def makeFloatIndex(k=10):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)))
-
def makeDateIndex(k=10, freq='B'):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq)
return DatetimeIndex(dr)
+def makeTimedeltaIndex(k=10, freq='D'):
+ return TimedeltaIndex(start='1 day',periods=k,freq=freq)
def makePeriodIndex(k=10):
dt = datetime(2000, 1, 1)
@@ -863,11 +862,12 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
- idx_type - "i"/"f"/"s"/"u"/"dt/"p".
+ idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
+ "td" create a datetime index.
if unspecified, string labels will be generated.
"""
@@ -878,7 +878,7 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
assert (names is None or names is False
or names is True or len(names) is nlevels)
assert idx_type is None or \
- (idx_type in ('i', 'f', 's', 'u', 'dt', 'p') and nlevels == 1)
+ (idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
@@ -893,7 +893,8 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex, s=makeStringIndex,
- u=makeUnicodeIndex, dt=makeDateIndex, p=makePeriodIndex).get(idx_type)
+ u=makeUnicodeIndex, dt=makeDateIndex, td=makeTimedeltaIndex,
+ p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
@@ -902,7 +903,7 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
return idx
elif idx_type is not None:
raise ValueError('"%s" is not a legal value for `idx_type`, use '
- '"i"/"f"/"s"/"u"/"dt/"p".' % idx_type)
+ '"i"/"f"/"s"/"u"/"dt/"p"/"td".' % idx_type)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
@@ -959,11 +960,12 @@ def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
doesn't divide nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
- r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt".
+ r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
+ "td" create a timedelta index.
if unspecified, string labels will be generated.
@@ -996,9 +998,9 @@ def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
- (r_idx_type in ('i', 'f', 's', 'u', 'dt', 'p') and r_idx_nlevels == 1)
+ (r_idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
- (c_idx_type in ('i', 'f', 's', 'u', 'dt', 'p') and c_idx_nlevels == 1)
+ (c_idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
| closes #3009
closes #8187
closes #8209
closes #4533
closes #8190
closes #7869
closes #8214
This PR creates a new `timedelta` sub-class, the scalar `Timedelta`, very similar to how `Timestamp` sub-classes `datetime`. Further it creates a `TimedeltaIndex` modelled on `DatetimeIndex` that holds the underlying data (boxing is only on iteration).
- ToDo
- Document API changes:
- [x] `to_timedelta` return `TimedeltaIndex` rather than a `Series`
- [x] `to_timedelta` with scalar returns `Timedelta` (rather than `np.timedelta64`)
- [x] Boxing for `list(Series(...))` is now `Timedelta`
- [x] possible repr changes
- [x] `timedelta_range`
- [x] usage / examples
- [x] API
- Timedelta
- [x] conversions `to_timedelta/to_pytimedelta`
- TimedeltaIndex
- [x] conversions `to_pytimedelta`
- [x] what else needs boxing in Series?
- [x] fix formatters (Timedelta64Formatter; remove `repr_timedelta64` (no longer necessary)
- [x] create `timedelta_range` & fix freq / inferred_freq
- [x] fix remaining copied methods from DatetimeIndex
- more tests (of TimedeltaIndex / scalar)
- [x] scalar comparisons
- [x] index comparisons
- [x] getitem / partial string access
- [x] sql expecting `np.timedelta64` ? (currently skipping these 2 tests!!!!), see: https://github.com/jreback/pandas/commit/2743ef16f7dfe55ba63663fa65c5671031af94f1
- [x] validate series / td ops
- move methods of `TimedeltaIndex/DatetimeIndex`
- [x] move `DatetimeOpsMixin` to `tseries/base.py`?
- [x] `shift/take/order`
- [x] `slice_indexer`
- [x] `append/union/intersection`
- [x] comparbless?
- [x] `.dt` accessor are ok now
- [x] move `.td` to `.tdi`
| https://api.github.com/repos/pandas-dev/pandas/pulls/8184 | 2014-09-05T14:37:21Z | 2014-09-13T22:59:30Z | 2014-09-13T22:59:29Z | 2014-09-16T19:31:04Z |
BUG: scatter with errorbar raises IndexError | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 541c25c71e662..5a65d4305ec55 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -650,6 +650,7 @@ Bug Fixes
- Bug in ``pivot_table`` performed with nameless ``index`` and ``columns`` raises ``KeyError`` (:issue:`8103`)
+- Bug in ``DataFrame.plot(kind='scatter')`` draws points and errorbars with different colors when the color is specified by ``c`` keyword (:issue:`8081`)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index a01574c4dd146..7694b1b087d10 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -349,7 +349,6 @@ def _check_has_errorbars(self, axes, xerr=0, yerr=0):
yerr : number
expected number of y errorbar
"""
-
axes = self._flatten_visible(axes)
for ax in axes:
containers = ax.containers
@@ -2805,12 +2804,28 @@ def test_errorbar_scatter(self):
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
+
ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y',
xerr=df_err, yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
+ def _check_errorbar_color(containers, expected, has_err='has_xerr'):
+ errs = [c.lines[1][0] for c in ax.containers if getattr(c, has_err, False)]
+ self._check_colors(errs, linecolors=[expected] * len(errs))
+
+ # GH 8081
+ df = DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
+ ax = df.plot(kind='scatter', x='a', y='b', xerr='d', yerr='e', c='red')
+ self._check_has_errorbars(ax, xerr=1, yerr=1)
+ _check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
+ _check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
+
+ ax = df.plot(kind='scatter', x='a', y='b', yerr='e', color='green')
+ self._check_has_errorbars(ax, xerr=0, yerr=1)
+ _check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
+
@tm.mplskip
class TestDataFrameGroupByPlots(TestPlotBase):
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 07200fcab3cd4..50f3ab23babad 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -1394,15 +1394,13 @@ def _make_plot(self):
label = None
scatter = ax.scatter(data[x].values, data[y].values, label=label,
**self.kwds)
-
self._add_legend_handle(scatter, label)
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
- errors_y = self._get_errorbars(label=y, index=1, xerr=False)
+ errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
- if 'color' in self.kwds:
- err_kwds['color'] = self.kwds['color']
+ err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
| Closes #8081.
Also fixes a color problem with `c` kw found in #7780. (But should work with single color. It looks `errorbar` doesn't accept multiple colors).
| https://api.github.com/repos/pandas-dev/pandas/pulls/8183 | 2014-09-05T14:03:30Z | 2014-09-05T16:16:34Z | 2014-09-05T16:16:34Z | 2014-11-22T09:27:01Z |
SQL: add warning for mismatch in provided and written table name (GH7815) | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index b72c41e45c9ca..48658132d7518 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -886,6 +886,12 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
name, self, frame=frame, index=index, if_exists=if_exists,
index_label=index_label, schema=schema)
table.insert(chunksize)
+ # check for potentially case sensitivity issues (GH7815)
+ if name not in self.engine.table_names(schema=schema or self.meta.schema):
+ warnings.warn("The provided table name '{0}' is not found exactly "
+ "as such in the database after writing the table, "
+ "possibly due to case sensitivity issues. Consider "
+ "using lower case table names.".format(name), UserWarning)
@property
def tables(self):
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 93c95169a60d1..0108335c94249 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -681,6 +681,19 @@ def test_not_reflect_all_tables(self):
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for other table")
+ def test_warning_case_insensitive_table_name(self):
+ # see GH7815.
+ # We can't test that this warning is triggered, a the database
+ # configuration would have to be altered. But here we test that
+ # the warning is certainly NOT triggered in a normal case.
+ with warnings.catch_warnings(record=True) as w:
+ # Cause all warnings to always be triggered.
+ warnings.simplefilter("always")
+ # This should not trigger a Warning
+ self.test_frame1.to_sql('CaseSensitive', self.conn)
+ # Verify some things
+ self.assertEqual(len(w), 0, "Warning triggered for writing a table")
+
class TestSQLLegacyApi(_TestSQLApi):
"""
| Closes #7815
Adds a warning if the provided table name in `to_sql` and the actually used table name in the database do not match (eg converted to lower case due to case insensitivity settings of database, eg MySQL on Windows). When user is not aware of this, and keeps using the upper case table name, this leads to bugs in subsequent `to_sql` calls.
@maxgrenderjones
| https://api.github.com/repos/pandas-dev/pandas/pulls/8180 | 2014-09-05T09:30:58Z | 2014-09-09T07:55:57Z | 2014-09-09T07:55:57Z | 2014-09-09T07:55:59Z |
DOC: added release note, this fixes #8173 | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 5ca9073c0b32f..541c25c71e662 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -441,8 +441,14 @@ Enhancements
- ``pandas.tseries.holiday`` has added support for additional holidays and ways to observe holidays (:issue:`7070`)
- ``pandas.tseries.holiday.Holiday`` now supports a list of offsets in Python3 (:issue:`7070`)
- ``pandas.tseries.holiday.Holiday`` now supports a days_of_week parameter (:issue:`7070`)
+- ``GroupBy.nth()`` now supports selecting multiple nth values (:issue:`7910`)
+ .. ipython:: python
+ business_dates = date_range(start='4/1/2014', end='6/30/2014', freq='B')
+ df = DataFrame(1, index=business_dates, columns=['a', 'b'])
+ # get the first, 4th, and last date index for each month
+ df.groupby((df.index.year, df.index.month)).nth([0, 3, -1])
- ``Period`` and ``PeriodIndex`` supports addition/subtraction with ``timedelta``-likes (:issue:`7966`)
| Closes #8173, release note for #7910
| https://api.github.com/repos/pandas-dev/pandas/pulls/8179 | 2014-09-05T04:18:18Z | 2014-09-05T06:29:08Z | 2014-09-05T06:29:08Z | 2015-04-29T15:34:45Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.