title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
TST/REF: collect tests from test_api into method-specific files | diff --git a/pandas/tests/frame/methods/test_copy.py b/pandas/tests/frame/methods/test_copy.py
new file mode 100644
index 0000000000000..be52cf55fccb2
--- /dev/null
+++ b/pandas/tests/frame/methods/test_copy.py
@@ -0,0 +1,43 @@
+import pytest
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+class TestCopy:
+ @pytest.mark.parametrize("attr", ["index", "columns"])
+ def test_copy_index_name_checking(self, float_frame, attr):
+ # don't want to be able to modify the index stored elsewhere after
+ # making a copy
+ ind = getattr(float_frame, attr)
+ ind.name = None
+ cp = float_frame.copy()
+ getattr(cp, attr).name = "foo"
+ assert getattr(float_frame, attr).name is None
+
+ def test_copy_cache(self):
+ # GH#31784 _item_cache not cleared on copy causes incorrect reads after updates
+ df = DataFrame({"a": [1]})
+
+ df["x"] = [0]
+ df["a"]
+
+ df.copy()
+
+ df["a"].values[0] = -1
+
+ tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0]}))
+
+ df["y"] = [0]
+
+ assert df["a"].values[0] == -1
+ tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0], "y": [0]}))
+
+ def test_copy(self, float_frame, float_string_frame):
+ cop = float_frame.copy()
+ cop["E"] = cop["A"]
+ assert "E" not in float_frame
+
+ # copy objects
+ copy = float_string_frame.copy()
+ assert copy._mgr is not float_string_frame._mgr
diff --git a/pandas/tests/frame/methods/test_to_numpy.py b/pandas/tests/frame/methods/test_to_numpy.py
new file mode 100644
index 0000000000000..3d69c004db6bb
--- /dev/null
+++ b/pandas/tests/frame/methods/test_to_numpy.py
@@ -0,0 +1,32 @@
+import numpy as np
+
+from pandas import DataFrame, Timestamp
+import pandas._testing as tm
+
+
+class TestToNumpy:
+ def test_to_numpy(self):
+ df = DataFrame({"A": [1, 2], "B": [3, 4.5]})
+ expected = np.array([[1, 3], [2, 4.5]])
+ result = df.to_numpy()
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_to_numpy_dtype(self):
+ df = DataFrame({"A": [1, 2], "B": [3, 4.5]})
+ expected = np.array([[1, 3], [2, 4]], dtype="int64")
+ result = df.to_numpy(dtype="int64")
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_to_numpy_copy(self):
+ arr = np.random.randn(4, 3)
+ df = DataFrame(arr)
+ assert df.values.base is arr
+ assert df.to_numpy(copy=False).base is arr
+ assert df.to_numpy(copy=True).base is not arr
+
+ def test_to_numpy_mixed_dtype_to_str(self):
+ # https://github.com/pandas-dev/pandas/issues/35455
+ df = DataFrame([[Timestamp("2020-01-01 00:00:00"), 100.0]])
+ result = df.to_numpy(dtype=str)
+ expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str)
+ tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 34d56672e5536..a8bc6132159bd 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -16,16 +16,6 @@
class TestDataFrameMisc:
- @pytest.mark.parametrize("attr", ["index", "columns"])
- def test_copy_index_name_checking(self, float_frame, attr):
- # don't want to be able to modify the index stored elsewhere after
- # making a copy
- ind = getattr(float_frame, attr)
- ind.name = None
- cp = float_frame.copy()
- getattr(cp, attr).name = "foo"
- assert getattr(float_frame, attr).name is None
-
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
@@ -86,8 +76,7 @@ def test_get_axis(self, float_frame):
f._get_axis_number(None)
def test_keys(self, float_frame):
- getkeys = float_frame.keys
- assert getkeys() is float_frame.columns
+ assert float_frame.keys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
@@ -136,15 +125,6 @@ def test_new_empty_index(self):
df1.index.name = "foo"
assert df2.index.name is None
- def test_array_interface(self, float_frame):
- with np.errstate(all="ignore"):
- result = np.sqrt(float_frame)
- assert isinstance(result, type(float_frame))
- assert result.index is float_frame.index
- assert result.columns is float_frame.columns
-
- tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
-
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
@@ -156,7 +136,7 @@ def test_get_agg_axis(self, float_frame):
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
- def test_nonzero(self, float_frame, float_string_frame):
+ def test_empty(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
@@ -313,32 +293,6 @@ def test_len(self, float_frame):
expected = float_frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(arr, expected)
- def test_to_numpy(self):
- df = DataFrame({"A": [1, 2], "B": [3, 4.5]})
- expected = np.array([[1, 3], [2, 4.5]])
- result = df.to_numpy()
- tm.assert_numpy_array_equal(result, expected)
-
- def test_to_numpy_dtype(self):
- df = DataFrame({"A": [1, 2], "B": [3, 4.5]})
- expected = np.array([[1, 3], [2, 4]], dtype="int64")
- result = df.to_numpy(dtype="int64")
- tm.assert_numpy_array_equal(result, expected)
-
- def test_to_numpy_copy(self):
- arr = np.random.randn(4, 3)
- df = DataFrame(arr)
- assert df.values.base is arr
- assert df.to_numpy(copy=False).base is arr
- assert df.to_numpy(copy=True).base is not arr
-
- def test_to_numpy_mixed_dtype_to_str(self):
- # https://github.com/pandas-dev/pandas/issues/35455
- df = DataFrame([[pd.Timestamp("2020-01-01 00:00:00"), 100.0]])
- result = df.to_numpy(dtype=str)
- expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str)
- tm.assert_numpy_array_equal(result, expected)
-
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
@@ -537,24 +491,6 @@ def test_set_flags(self, allows_duplicate_labels):
result.iloc[0, 0] = 10
assert df.iloc[0, 0] == 0
- def test_cache_on_copy(self):
- # GH 31784 _item_cache not cleared on copy causes incorrect reads after updates
- df = DataFrame({"a": [1]})
-
- df["x"] = [0]
- df["a"]
-
- df.copy()
-
- df["a"].values[0] = -1
-
- tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0]}))
-
- df["y"] = [0]
-
- assert df["a"].values[0] == -1
- tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0], "y": [0]}))
-
@skip_if_no("jinja2")
def test_constructor_expanddim_lookup(self):
# GH#33628 accessing _constructor_expanddim should not
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index e3aff0df3bab3..34aa11eb76306 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -254,15 +254,6 @@ def f(dtype):
if not compat.is_platform_windows():
f("M8[ns]")
- def test_copy(self, float_frame, float_string_frame):
- cop = float_frame.copy()
- cop["E"] = cop["A"]
- assert "E" not in float_frame
-
- # copy objects
- copy = float_string_frame.copy()
- assert copy._mgr is not float_string_frame._mgr
-
def test_pickle(self, float_string_frame, timezone_frame):
empty_frame = DataFrame()
diff --git a/pandas/tests/frame/test_npfuncs.py b/pandas/tests/frame/test_npfuncs.py
index a3b4c659a4124..1e37822798244 100644
--- a/pandas/tests/frame/test_npfuncs.py
+++ b/pandas/tests/frame/test_npfuncs.py
@@ -14,3 +14,12 @@ def test_asarray_homogenous(self):
# may change from object in the future
expected = np.array([[1, 1], [2, 2]], dtype="object")
tm.assert_numpy_array_equal(result, expected)
+
+ def test_np_sqrt(self, float_frame):
+ with np.errstate(all="ignore"):
+ result = np.sqrt(float_frame)
+ assert isinstance(result, type(float_frame))
+ assert result.index is float_frame.index
+ assert result.columns is float_frame.columns
+
+ tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_reductions.py
similarity index 100%
rename from pandas/tests/frame/test_analytics.py
rename to pandas/tests/frame/test_reductions.py
diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py
index 3d814f22ce262..1271a490d6b70 100644
--- a/pandas/tests/frame/test_timezones.py
+++ b/pandas/tests/frame/test_timezones.py
@@ -58,18 +58,3 @@ def test_boolean_compare_transpose_tzindex_with_dst(self, tz):
result = df.T == df.T
expected = DataFrame(True, index=list("ab"), columns=idx)
tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("copy", [True, False])
- @pytest.mark.parametrize(
- "method, tz", [["tz_localize", None], ["tz_convert", "Europe/Berlin"]]
- )
- def test_tz_localize_convert_copy_inplace_mutate(self, copy, method, tz):
- # GH 6326
- result = DataFrame(
- np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz)
- )
- getattr(result, method)("UTC", copy=copy)
- expected = DataFrame(
- np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz)
- )
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py
index 4c2bf4683d17d..069557cc65455 100644
--- a/pandas/tests/series/methods/test_append.py
+++ b/pandas/tests/series/methods/test_append.py
@@ -7,6 +7,10 @@
class TestSeriesAppend:
+ def test_append_preserve_name(self, datetime_series):
+ result = datetime_series[:5].append(datetime_series[5:])
+ assert result.name == datetime_series.name
+
def test_append(self, datetime_series, string_series, object_series):
appended_series = string_series.append(object_series)
for idx, value in appended_series.items():
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index c948af41c5e8f..ac8a7b05fd30a 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -26,10 +26,6 @@
class TestSeriesMisc:
- def test_append_preserve_name(self, datetime_series):
- result = datetime_series[:5].append(datetime_series[5:])
- assert result.name == datetime_series.name
-
def test_getitem_preserve_name(self, datetime_series):
result = datetime_series[datetime_series > 0]
assert result.name == datetime_series.name
@@ -163,10 +159,7 @@ def test_iter_strings(self, string_series):
assert val == string_series[i]
def test_keys(self, datetime_series):
- # HACK: By doing this in two stages, we avoid 2to3 wrapping the call
- # to .keys() in a list()
- getkeys = datetime_series.keys
- assert getkeys() is datetime_series.index
+ assert datetime_series.keys() is datetime_series.index
def test_values(self, datetime_series):
tm.assert_almost_equal(
@@ -213,10 +206,6 @@ def test_class_axis(self):
# no exception and no empty docstring
assert pydoc.getdoc(Series.index)
- def test_numpy_unique(self, datetime_series):
- # it works!
- np.unique(datetime_series)
-
def test_item(self):
s = Series([1])
result = s.item()
diff --git a/pandas/tests/series/test_duplicates.py b/pandas/tests/series/test_duplicates.py
index 89181a08819b1..672be981fd7d3 100644
--- a/pandas/tests/series/test_duplicates.py
+++ b/pandas/tests/series/test_duplicates.py
@@ -21,6 +21,11 @@ def test_nunique():
assert s.nunique() == 0
+def test_numpy_unique(datetime_series):
+ # it works!
+ np.unique(datetime_series)
+
+
def test_unique():
# GH714 also, dtype=float
s = Series([1.2345] * 100)
| I was surprised we didnt already have a test_copy
Also an easter egg:
```
def test_keys(self, datetime_series):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = datetime_series.keys
assert getkeys() is datetime_series.index
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/37525 | 2020-10-30T20:31:52Z | 2020-10-31T15:28:18Z | 2020-10-31T15:28:18Z | 2020-10-31T16:03:15Z |
BUG: slice_canonize incorrectly raising | diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 4f27fde52414a..006fd34632d5a 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -207,7 +207,7 @@ cdef slice slice_canonize(slice s):
Convert slice to canonical bounded form.
"""
cdef:
- Py_ssize_t start = 0, stop = 0, step = 1, length
+ Py_ssize_t start = 0, stop = 0, step = 1
if s.step is None:
step = 1
@@ -239,7 +239,7 @@ cdef slice slice_canonize(slice s):
if stop > start:
stop = start
- if start < 0 or (stop < 0 and s.stop is not None):
+ if start < 0 or (stop < 0 and s.stop is not None and step > 0):
raise ValueError("unbounded slice")
if stop < 0:
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 689cdbce103e6..8ee84803339df 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -922,6 +922,13 @@ def test_zero_step_raises(self, slc):
with pytest.raises(ValueError, match=msg):
BlockPlacement(slc)
+ def test_slice_canonize_negative_stop(self):
+ # GH#37524 negative stop is OK with negative step and positive start
+ slc = slice(3, -1, -2)
+
+ bp = BlockPlacement(slc)
+ assert bp.indexer == slice(3, None, -2)
+
@pytest.mark.parametrize(
"slc",
[
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Doesn't appear to affect anything in master, but stumbled on it when implementing the PR do always return views when indexing on columns. | https://api.github.com/repos/pandas-dev/pandas/pulls/37524 | 2020-10-30T20:21:29Z | 2020-10-31T19:31:24Z | 2020-10-31T19:31:24Z | 2020-10-31T19:32:58Z |
TST/REF: collect Series accessor tests | diff --git a/pandas/tests/series/accessors/__init__.py b/pandas/tests/series/accessors/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/series/accessors/test_cat_accessor.py b/pandas/tests/series/accessors/test_cat_accessor.py
new file mode 100644
index 0000000000000..f561ac82a8901
--- /dev/null
+++ b/pandas/tests/series/accessors/test_cat_accessor.py
@@ -0,0 +1,242 @@
+import warnings
+
+import numpy as np
+import pytest
+
+from pandas import (
+ Categorical,
+ DataFrame,
+ DatetimeIndex,
+ Index,
+ Series,
+ TimedeltaIndex,
+ Timestamp,
+ date_range,
+ period_range,
+ timedelta_range,
+)
+import pandas._testing as tm
+from pandas.core.arrays import PeriodArray
+from pandas.core.arrays.categorical import CategoricalAccessor
+from pandas.core.indexes.accessors import Properties
+
+
+class TestCatAccessor:
+ @pytest.mark.parametrize(
+ "method",
+ [
+ lambda x: x.cat.set_categories([1, 2, 3]),
+ lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
+ lambda x: x.cat.rename_categories([1, 2, 3]),
+ lambda x: x.cat.remove_unused_categories(),
+ lambda x: x.cat.remove_categories([2]),
+ lambda x: x.cat.add_categories([4]),
+ lambda x: x.cat.as_ordered(),
+ lambda x: x.cat.as_unordered(),
+ ],
+ )
+ def test_getname_categorical_accessor(self, method):
+ # GH#17509
+ ser = Series([1, 2, 3], name="A").astype("category")
+ expected = "A"
+ result = method(ser).name
+ assert result == expected
+
+ def test_cat_accessor(self):
+ ser = Series(Categorical(["a", "b", np.nan, "a"]))
+ tm.assert_index_equal(ser.cat.categories, Index(["a", "b"]))
+ assert not ser.cat.ordered, False
+
+ exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
+ return_value = ser.cat.set_categories(["b", "a"], inplace=True)
+ assert return_value is None
+ tm.assert_categorical_equal(ser.values, exp)
+
+ res = ser.cat.set_categories(["b", "a"])
+ tm.assert_categorical_equal(res.values, exp)
+
+ ser[:] = "a"
+ ser = ser.cat.remove_unused_categories()
+ tm.assert_index_equal(ser.cat.categories, Index(["a"]))
+
+ def test_cat_accessor_api(self):
+ # GH#9322
+
+ assert Series.cat is CategoricalAccessor
+ ser = Series(list("aabbcde")).astype("category")
+ assert isinstance(ser.cat, CategoricalAccessor)
+
+ invalid = Series([1])
+ with pytest.raises(AttributeError, match="only use .cat accessor"):
+ invalid.cat
+ assert not hasattr(invalid, "cat")
+
+ def test_cat_accessor_no_new_attributes(self):
+ # https://github.com/pandas-dev/pandas/issues/10673
+ cat = Series(list("aabbcde")).astype("category")
+ with pytest.raises(AttributeError, match="You cannot add any new attribute"):
+ cat.cat.xlabel = "a"
+
+ def test_cat_accessor_updates_on_inplace(self):
+ ser = Series(list("abc")).astype("category")
+ return_value = ser.drop(0, inplace=True)
+ assert return_value is None
+ return_value = ser.cat.remove_unused_categories(inplace=True)
+ assert return_value is None
+ assert len(ser.cat.categories) == 2
+
+ def test_categorical_delegations(self):
+
+ # invalid accessor
+ msg = r"Can only use \.cat accessor with a 'category' dtype"
+ with pytest.raises(AttributeError, match=msg):
+ Series([1, 2, 3]).cat
+ with pytest.raises(AttributeError, match=msg):
+ Series([1, 2, 3]).cat()
+ with pytest.raises(AttributeError, match=msg):
+ Series(["a", "b", "c"]).cat
+ with pytest.raises(AttributeError, match=msg):
+ Series(np.arange(5.0)).cat
+ with pytest.raises(AttributeError, match=msg):
+ Series([Timestamp("20130101")]).cat
+
+ # Series should delegate calls to '.categories', '.codes', '.ordered'
+ # and the methods '.set_categories()' 'drop_unused_categories()' to the
+ # categorical
+ ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
+ exp_categories = Index(["a", "b", "c"])
+ tm.assert_index_equal(ser.cat.categories, exp_categories)
+ ser.cat.categories = [1, 2, 3]
+ exp_categories = Index([1, 2, 3])
+ tm.assert_index_equal(ser.cat.categories, exp_categories)
+
+ exp_codes = Series([0, 1, 2, 0], dtype="int8")
+ tm.assert_series_equal(ser.cat.codes, exp_codes)
+
+ assert ser.cat.ordered
+ ser = ser.cat.as_unordered()
+ assert not ser.cat.ordered
+ return_value = ser.cat.as_ordered(inplace=True)
+ assert return_value is None
+ assert ser.cat.ordered
+
+ # reorder
+ ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
+ exp_categories = Index(["c", "b", "a"])
+ exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
+ ser = ser.cat.set_categories(["c", "b", "a"])
+ tm.assert_index_equal(ser.cat.categories, exp_categories)
+ tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
+ tm.assert_numpy_array_equal(ser.__array__(), exp_values)
+
+ # remove unused categories
+ ser = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"]))
+ exp_categories = Index(["a", "b"])
+ exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
+ ser = ser.cat.remove_unused_categories()
+ tm.assert_index_equal(ser.cat.categories, exp_categories)
+ tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
+ tm.assert_numpy_array_equal(ser.__array__(), exp_values)
+
+ # This method is likely to be confused, so test that it raises an error
+ # on wrong inputs:
+ msg = "'Series' object has no attribute 'set_categories'"
+ with pytest.raises(AttributeError, match=msg):
+ ser.set_categories([4, 3, 2, 1])
+
+ # right: ser.cat.set_categories([4,3,2,1])
+
+ # GH#18862 (let Series.cat.rename_categories take callables)
+ ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
+ result = ser.cat.rename_categories(lambda x: x.upper())
+ expected = Series(
+ Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True)
+ )
+ tm.assert_series_equal(result, expected)
+
+ def test_dt_accessor_api_for_categorical(self):
+ # https://github.com/pandas-dev/pandas/issues/10661
+
+ s_dr = Series(date_range("1/1/2015", periods=5, tz="MET"))
+ c_dr = s_dr.astype("category")
+
+ s_pr = Series(period_range("1/1/2015", freq="D", periods=5))
+ c_pr = s_pr.astype("category")
+
+ s_tdr = Series(timedelta_range("1 days", "10 days"))
+ c_tdr = s_tdr.astype("category")
+
+ # only testing field (like .day)
+ # and bool (is_month_start)
+ get_ops = lambda x: x._datetimelike_ops
+
+ test_data = [
+ ("Datetime", get_ops(DatetimeIndex), s_dr, c_dr),
+ ("Period", get_ops(PeriodArray), s_pr, c_pr),
+ ("Timedelta", get_ops(TimedeltaIndex), s_tdr, c_tdr),
+ ]
+
+ assert isinstance(c_dr.dt, Properties)
+
+ special_func_defs = [
+ ("strftime", ("%Y-%m-%d",), {}),
+ ("tz_convert", ("EST",), {}),
+ ("round", ("D",), {}),
+ ("floor", ("D",), {}),
+ ("ceil", ("D",), {}),
+ ("asfreq", ("D",), {}),
+ # FIXME: don't leave commented-out
+ # ('tz_localize', ("UTC",), {}),
+ ]
+ _special_func_names = [f[0] for f in special_func_defs]
+
+ # the series is already localized
+ _ignore_names = ["tz_localize", "components"]
+
+ for name, attr_names, s, c in test_data:
+ func_names = [
+ f
+ for f in dir(s.dt)
+ if not (
+ f.startswith("_")
+ or f in attr_names
+ or f in _special_func_names
+ or f in _ignore_names
+ )
+ ]
+
+ func_defs = [(f, (), {}) for f in func_names]
+ for f_def in special_func_defs:
+ if f_def[0] in dir(s.dt):
+ func_defs.append(f_def)
+
+ for func, args, kwargs in func_defs:
+ with warnings.catch_warnings():
+ if func == "to_period":
+ # dropping TZ
+ warnings.simplefilter("ignore", UserWarning)
+ res = getattr(c.dt, func)(*args, **kwargs)
+ exp = getattr(s.dt, func)(*args, **kwargs)
+
+ tm.assert_equal(res, exp)
+
+ for attr in attr_names:
+ if attr in ["week", "weekofyear"]:
+ # GH#33595 Deprecate week and weekofyear
+ continue
+ res = getattr(c.dt, attr)
+ exp = getattr(s.dt, attr)
+
+ if isinstance(res, DataFrame):
+ tm.assert_frame_equal(res, exp)
+ elif isinstance(res, Series):
+ tm.assert_series_equal(res, exp)
+ else:
+ tm.assert_almost_equal(res, exp)
+
+ invalid = Series([1, 2, 3]).astype("category")
+ msg = "Can only use .dt accessor with datetimelike"
+
+ with pytest.raises(AttributeError, match=msg):
+ invalid.dt
+ assert not hasattr(invalid, "str")
diff --git a/pandas/tests/series/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py
similarity index 100%
rename from pandas/tests/series/test_dt_accessor.py
rename to pandas/tests/series/accessors/test_dt_accessor.py
diff --git a/pandas/tests/series/accessors/test_sparse_accessor.py b/pandas/tests/series/accessors/test_sparse_accessor.py
new file mode 100644
index 0000000000000..118095b5dcdbc
--- /dev/null
+++ b/pandas/tests/series/accessors/test_sparse_accessor.py
@@ -0,0 +1,9 @@
+from pandas import Series
+
+
+class TestSparseAccessor:
+ def test_sparse_accessor_updates_on_inplace(self):
+ ser = Series([1, 1, 2, 3], dtype="Sparse[int]")
+ return_value = ser.drop([0, 1], inplace=True)
+ assert return_value is None
+ assert ser.sparse.density == 1.0
diff --git a/pandas/tests/series/accessors/test_str_accessor.py b/pandas/tests/series/accessors/test_str_accessor.py
new file mode 100644
index 0000000000000..09d965ef1f322
--- /dev/null
+++ b/pandas/tests/series/accessors/test_str_accessor.py
@@ -0,0 +1,25 @@
+import pytest
+
+from pandas import Series
+import pandas._testing as tm
+
+
+class TestStrAccessor:
+ def test_str_attribute(self):
+ # GH#9068
+ methods = ["strip", "rstrip", "lstrip"]
+ ser = Series([" jack", "jill ", " jesse ", "frank"])
+ for method in methods:
+ expected = Series([getattr(str, method)(x) for x in ser.values])
+ tm.assert_series_equal(getattr(Series.str, method)(ser.str), expected)
+
+ # str accessor only valid with string values
+ ser = Series(range(5))
+ with pytest.raises(AttributeError, match="only use .str accessor"):
+ ser.str.repeat(2)
+
+ def test_str_accessor_updates_on_inplace(self):
+ ser = Series(list("abc"))
+ return_value = ser.drop([0], inplace=True)
+ assert return_value is None
+ assert len(ser.str.lower()) == 2
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index c948af41c5e8f..4461e69c72256 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -1,5 +1,4 @@
import pydoc
-import warnings
import numpy as np
import pytest
@@ -8,21 +7,8 @@
from pandas.util._test_decorators import async_mark
import pandas as pd
-from pandas import (
- Categorical,
- DataFrame,
- DatetimeIndex,
- Index,
- Series,
- Timedelta,
- TimedeltaIndex,
- Timestamp,
- date_range,
- period_range,
- timedelta_range,
-)
+from pandas import DataFrame, Index, Series, Timedelta, Timestamp, date_range
import pandas._testing as tm
-from pandas.core.arrays import PeriodArray
class TestSeriesMisc:
@@ -55,12 +41,6 @@ def _pickle_roundtrip(self, obj):
unpickled = pd.read_pickle(path)
return unpickled
- def test_sparse_accessor_updates_on_inplace(self):
- s = Series([1, 1, 2, 3], dtype="Sparse[int]")
- return_value = s.drop([0, 1], inplace=True)
- assert return_value is None
- assert s.sparse.density == 1.0
-
def test_tab_completion(self):
# GH 9910
s = Series(list("abcd"))
@@ -287,25 +267,6 @@ def f(x):
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order="F"), s.values.ravel(order="F"))
- def test_str_accessor_updates_on_inplace(self):
- s = Series(list("abc"))
- return_value = s.drop([0], inplace=True)
- assert return_value is None
- assert len(s.str.lower()) == 2
-
- def test_str_attribute(self):
- # GH9068
- methods = ["strip", "rstrip", "lstrip"]
- s = Series([" jack", "jill ", " jesse ", "frank"])
- for method in methods:
- expected = Series([getattr(str, method)(x) for x in s.values])
- tm.assert_series_equal(getattr(Series.str, method)(s.str), expected)
-
- # str accessor only valid with string values
- s = Series(range(5))
- with pytest.raises(AttributeError, match="only use .str accessor"):
- s.str.repeat(2)
-
def test_empty_method(self):
s_empty = Series(dtype=object)
assert s_empty.empty
@@ -377,226 +338,3 @@ def test_set_flags(self, allows_duplicate_labels):
)
result.iloc[0] = 10
assert df.iloc[0] == 0
-
-
-class TestCategoricalSeries:
- @pytest.mark.parametrize(
- "method",
- [
- lambda x: x.cat.set_categories([1, 2, 3]),
- lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
- lambda x: x.cat.rename_categories([1, 2, 3]),
- lambda x: x.cat.remove_unused_categories(),
- lambda x: x.cat.remove_categories([2]),
- lambda x: x.cat.add_categories([4]),
- lambda x: x.cat.as_ordered(),
- lambda x: x.cat.as_unordered(),
- ],
- )
- def test_getname_categorical_accessor(self, method):
- # GH 17509
- s = Series([1, 2, 3], name="A").astype("category")
- expected = "A"
- result = method(s).name
- assert result == expected
-
- def test_cat_accessor(self):
- s = Series(Categorical(["a", "b", np.nan, "a"]))
- tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
- assert not s.cat.ordered, False
-
- exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
- return_value = s.cat.set_categories(["b", "a"], inplace=True)
- assert return_value is None
- tm.assert_categorical_equal(s.values, exp)
-
- res = s.cat.set_categories(["b", "a"])
- tm.assert_categorical_equal(res.values, exp)
-
- s[:] = "a"
- s = s.cat.remove_unused_categories()
- tm.assert_index_equal(s.cat.categories, Index(["a"]))
-
- def test_cat_accessor_api(self):
- # GH 9322
- from pandas.core.arrays.categorical import CategoricalAccessor
-
- assert Series.cat is CategoricalAccessor
- s = Series(list("aabbcde")).astype("category")
- assert isinstance(s.cat, CategoricalAccessor)
-
- invalid = Series([1])
- with pytest.raises(AttributeError, match="only use .cat accessor"):
- invalid.cat
- assert not hasattr(invalid, "cat")
-
- def test_cat_accessor_no_new_attributes(self):
- # https://github.com/pandas-dev/pandas/issues/10673
- c = Series(list("aabbcde")).astype("category")
- with pytest.raises(AttributeError, match="You cannot add any new attribute"):
- c.cat.xlabel = "a"
-
- def test_cat_accessor_updates_on_inplace(self):
- s = Series(list("abc")).astype("category")
- return_value = s.drop(0, inplace=True)
- assert return_value is None
- return_value = s.cat.remove_unused_categories(inplace=True)
- assert return_value is None
- assert len(s.cat.categories) == 2
-
- def test_categorical_delegations(self):
-
- # invalid accessor
- msg = r"Can only use \.cat accessor with a 'category' dtype"
- with pytest.raises(AttributeError, match=msg):
- Series([1, 2, 3]).cat
- with pytest.raises(AttributeError, match=msg):
- Series([1, 2, 3]).cat()
- with pytest.raises(AttributeError, match=msg):
- Series(["a", "b", "c"]).cat
- with pytest.raises(AttributeError, match=msg):
- Series(np.arange(5.0)).cat
- with pytest.raises(AttributeError, match=msg):
- Series([Timestamp("20130101")]).cat
-
- # Series should delegate calls to '.categories', '.codes', '.ordered'
- # and the methods '.set_categories()' 'drop_unused_categories()' to the
- # categorical
- s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
- exp_categories = Index(["a", "b", "c"])
- tm.assert_index_equal(s.cat.categories, exp_categories)
- s.cat.categories = [1, 2, 3]
- exp_categories = Index([1, 2, 3])
- tm.assert_index_equal(s.cat.categories, exp_categories)
-
- exp_codes = Series([0, 1, 2, 0], dtype="int8")
- tm.assert_series_equal(s.cat.codes, exp_codes)
-
- assert s.cat.ordered
- s = s.cat.as_unordered()
- assert not s.cat.ordered
- return_value = s.cat.as_ordered(inplace=True)
- assert return_value is None
- assert s.cat.ordered
-
- # reorder
- s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
- exp_categories = Index(["c", "b", "a"])
- exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
- s = s.cat.set_categories(["c", "b", "a"])
- tm.assert_index_equal(s.cat.categories, exp_categories)
- tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
- tm.assert_numpy_array_equal(s.__array__(), exp_values)
-
- # remove unused categories
- s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"]))
- exp_categories = Index(["a", "b"])
- exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
- s = s.cat.remove_unused_categories()
- tm.assert_index_equal(s.cat.categories, exp_categories)
- tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
- tm.assert_numpy_array_equal(s.__array__(), exp_values)
-
- # This method is likely to be confused, so test that it raises an error
- # on wrong inputs:
- msg = "'Series' object has no attribute 'set_categories'"
- with pytest.raises(AttributeError, match=msg):
- s.set_categories([4, 3, 2, 1])
-
- # right: s.cat.set_categories([4,3,2,1])
-
- # GH18862 (let Series.cat.rename_categories take callables)
- s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
- result = s.cat.rename_categories(lambda x: x.upper())
- expected = Series(
- Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True)
- )
- tm.assert_series_equal(result, expected)
-
- def test_dt_accessor_api_for_categorical(self):
- # https://github.com/pandas-dev/pandas/issues/10661
- from pandas.core.indexes.accessors import Properties
-
- s_dr = Series(date_range("1/1/2015", periods=5, tz="MET"))
- c_dr = s_dr.astype("category")
-
- s_pr = Series(period_range("1/1/2015", freq="D", periods=5))
- c_pr = s_pr.astype("category")
-
- s_tdr = Series(timedelta_range("1 days", "10 days"))
- c_tdr = s_tdr.astype("category")
-
- # only testing field (like .day)
- # and bool (is_month_start)
- get_ops = lambda x: x._datetimelike_ops
-
- test_data = [
- ("Datetime", get_ops(DatetimeIndex), s_dr, c_dr),
- ("Period", get_ops(PeriodArray), s_pr, c_pr),
- ("Timedelta", get_ops(TimedeltaIndex), s_tdr, c_tdr),
- ]
-
- assert isinstance(c_dr.dt, Properties)
-
- special_func_defs = [
- ("strftime", ("%Y-%m-%d",), {}),
- ("tz_convert", ("EST",), {}),
- ("round", ("D",), {}),
- ("floor", ("D",), {}),
- ("ceil", ("D",), {}),
- ("asfreq", ("D",), {}),
- # FIXME: don't leave commented-out
- # ('tz_localize', ("UTC",), {}),
- ]
- _special_func_names = [f[0] for f in special_func_defs]
-
- # the series is already localized
- _ignore_names = ["tz_localize", "components"]
-
- for name, attr_names, s, c in test_data:
- func_names = [
- f
- for f in dir(s.dt)
- if not (
- f.startswith("_")
- or f in attr_names
- or f in _special_func_names
- or f in _ignore_names
- )
- ]
-
- func_defs = [(f, (), {}) for f in func_names]
- for f_def in special_func_defs:
- if f_def[0] in dir(s.dt):
- func_defs.append(f_def)
-
- for func, args, kwargs in func_defs:
- with warnings.catch_warnings():
- if func == "to_period":
- # dropping TZ
- warnings.simplefilter("ignore", UserWarning)
- res = getattr(c.dt, func)(*args, **kwargs)
- exp = getattr(s.dt, func)(*args, **kwargs)
-
- tm.assert_equal(res, exp)
-
- for attr in attr_names:
- if attr in ["week", "weekofyear"]:
- # GH#33595 Deprecate week and weekofyear
- continue
- res = getattr(c.dt, attr)
- exp = getattr(s.dt, attr)
-
- if isinstance(res, DataFrame):
- tm.assert_frame_equal(res, exp)
- elif isinstance(res, Series):
- tm.assert_series_equal(res, exp)
- else:
- tm.assert_almost_equal(res, exp)
-
- invalid = Series([1, 2, 3]).astype("category")
- msg = "Can only use .dt accessor with datetimelike"
-
- with pytest.raises(AttributeError, match=msg):
- invalid.dt
- assert not hasattr(invalid, "str")
| https://api.github.com/repos/pandas-dev/pandas/pulls/37523 | 2020-10-30T20:19:29Z | 2020-10-31T15:16:04Z | 2020-10-31T15:16:04Z | 2020-10-31T15:19:29Z | |
TST: suppress warnings we cant do anything about | diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index c474e67123ef7..e9eaa95ca2ca3 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -21,7 +21,6 @@
"xlrd",
marks=[
td.skip_if_no("xlrd"),
- pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param(
@@ -35,7 +34,6 @@
None,
marks=[
td.skip_if_no("xlrd"),
- pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param("pyxlsb", marks=td.skip_if_no("pyxlsb")),
diff --git a/pandas/tests/io/pytables/__init__.py b/pandas/tests/io/pytables/__init__.py
index e69de29bb2d1d..fb4b317a5e977 100644
--- a/pandas/tests/io/pytables/__init__.py
+++ b/pandas/tests/io/pytables/__init__.py
@@ -0,0 +1,9 @@
+import pytest
+
+pytestmark = [
+ # pytables https://github.com/PyTables/PyTables/issues/822
+ pytest.mark.filterwarnings(
+ "ignore:a closed node found in the registry:UserWarning"
+ ),
+ pytest.mark.filterwarnings(r"ignore:tostring\(\) is deprecated:DeprecationWarning"),
+]
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index e3b779678c68b..209a4233fc3b7 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -27,6 +27,7 @@ def test_foo():
from distutils.version import LooseVersion
import locale
from typing import Callable, Optional
+import warnings
import numpy as np
import pytest
@@ -51,10 +52,20 @@ def safe_import(mod_name: str, min_version: Optional[str] = None):
object
The imported module if successful, or False
"""
- try:
- mod = __import__(mod_name)
- except ImportError:
- return False
+ with warnings.catch_warnings():
+ # Suppress warnings that we can't do anything about,
+ # e.g. from aiohttp
+ warnings.filterwarnings(
+ "ignore",
+ category=DeprecationWarning,
+ module="aiohttp",
+ message=".*decorator is deprecated since Python 3.8.*",
+ )
+
+ try:
+ mod = __import__(mod_name)
+ except ImportError:
+ return False
if not min_version:
return mod
| and remove redundant filterwarnings from excel.test_readers | https://api.github.com/repos/pandas-dev/pandas/pulls/37522 | 2020-10-30T20:01:48Z | 2020-10-30T21:41:34Z | 2020-10-30T21:41:34Z | 2020-10-30T22:08:34Z |
REF: _setitem_with_indexer_split_path | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index e376f930c8c63..3cb9cba01da48 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1625,94 +1625,94 @@ def _setitem_with_indexer(self, indexer, value):
self._setitem_with_indexer_missing(indexer, value)
return
- # set
- item_labels = self.obj._get_axis(info_axis)
-
# align and set the values
if take_split_path:
# We have to operate column-wise
+ self._setitem_with_indexer_split_path(indexer, value)
+ else:
+ self._setitem_single_block(indexer, value)
- # Above we only set take_split_path to True for 2D cases
- assert self.ndim == 2
- assert info_axis == 1
+ def _setitem_with_indexer_split_path(self, indexer, value):
+ """
+ Setitem column-wise.
+ """
+ # Above we only set take_split_path to True for 2D cases
+ assert self.ndim == 2
- if not isinstance(indexer, tuple):
- indexer = _tuplify(self.ndim, indexer)
+ if not isinstance(indexer, tuple):
+ indexer = _tuplify(self.ndim, indexer)
- if isinstance(value, ABCSeries):
- value = self._align_series(indexer, value)
+ if isinstance(value, ABCSeries):
+ value = self._align_series(indexer, value)
- info_idx = indexer[info_axis]
- if is_integer(info_idx):
- info_idx = [info_idx]
- labels = item_labels[info_idx]
+ info_idx = indexer[1]
+ if is_integer(info_idx):
+ info_idx = [info_idx]
+ labels = self.obj.columns[info_idx]
- # Ensure we have something we can iterate over
- ilocs = self._ensure_iterable_column_indexer(indexer[1])
+ # Ensure we have something we can iterate over
+ ilocs = self._ensure_iterable_column_indexer(indexer[1])
- plane_indexer = indexer[:1]
- lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
- # lplane_indexer gives the expected length of obj[indexer[0]]
+ plane_indexer = indexer[:1]
+ lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
+ # lplane_indexer gives the expected length of obj[indexer[0]]
- if len(labels) == 1:
- # We can operate on a single column
+ if len(labels) == 1:
+ # We can operate on a single column
- # require that we are setting the right number of values that
- # we are indexing
- if is_list_like_indexer(value) and 0 != lplane_indexer != len(value):
- # Exclude zero-len for e.g. boolean masking that is all-false
- raise ValueError(
- "cannot set using a multi-index "
- "selection indexer with a different "
- "length than the value"
- )
-
- # we need an iterable, with a ndim of at least 1
- # eg. don't pass through np.array(0)
- if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
-
- # we have an equal len Frame
- if isinstance(value, ABCDataFrame):
- self._setitem_with_indexer_frame_value(indexer, value)
-
- # we have an equal len ndarray/convertible to our labels
- # hasattr first, to avoid coercing to ndarray without reason.
- # But we may be relying on the ndarray coercion to check ndim.
- # Why not just convert to an ndarray earlier on if needed?
- elif np.ndim(value) == 2:
- self._setitem_with_indexer_2d_value(indexer, value)
-
- elif (
- len(labels) == 1
- and lplane_indexer == len(value)
- and not is_scalar(plane_indexer[0])
- ):
- # we have an equal len list/ndarray
- # We only get here with len(labels) == len(ilocs) == 1
- self._setitem_single_column(ilocs[0], value, plane_indexer)
+ # require that we are setting the right number of values that
+ # we are indexing
+ if is_list_like_indexer(value) and 0 != lplane_indexer != len(value):
+ # Exclude zero-len for e.g. boolean masking that is all-false
+ raise ValueError(
+ "cannot set using a multi-index "
+ "selection indexer with a different "
+ "length than the value"
+ )
- elif lplane_indexer == 0 and len(value) == len(self.obj.index):
- # We get here in one case via .loc with a all-False mask
- pass
+ # we need an iterable, with a ndim of at least 1
+ # eg. don't pass through np.array(0)
+ if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
+
+ # we have an equal len Frame
+ if isinstance(value, ABCDataFrame):
+ self._setitem_with_indexer_frame_value(indexer, value)
+
+ # we have an equal len ndarray/convertible to our labels
+ # hasattr first, to avoid coercing to ndarray without reason.
+ # But we may be relying on the ndarray coercion to check ndim.
+ # Why not just convert to an ndarray earlier on if needed?
+ elif np.ndim(value) == 2:
+ self._setitem_with_indexer_2d_value(indexer, value)
+
+ elif (
+ len(labels) == 1
+ and lplane_indexer == len(value)
+ and not is_scalar(plane_indexer[0])
+ ):
+ # we have an equal len list/ndarray
+ # We only get here with len(labels) == len(ilocs) == 1
+ self._setitem_single_column(ilocs[0], value, plane_indexer)
- else:
- # per-label values
- if len(ilocs) != len(value):
- raise ValueError(
- "Must have equal len keys and value "
- "when setting with an iterable"
- )
+ elif lplane_indexer == 0 and len(value) == len(self.obj.index):
+ # We get here in one case via .loc with a all-False mask
+ pass
- for loc, v in zip(ilocs, value):
- self._setitem_single_column(loc, v, plane_indexer)
else:
+ # per-label values
+ if len(ilocs) != len(value):
+ raise ValueError(
+ "Must have equal len keys and value "
+ "when setting with an iterable"
+ )
- # scalar value
- for loc in ilocs:
- self._setitem_single_column(loc, value, plane_indexer)
-
+ for loc, v in zip(ilocs, value):
+ self._setitem_single_column(loc, v, plane_indexer)
else:
- self._setitem_single_block(indexer, value)
+
+ # scalar value
+ for loc in ilocs:
+ self._setitem_single_column(loc, value, plane_indexer)
def _setitem_with_indexer_2d_value(self, indexer, value):
# We get here with np.ndim(value) == 2, excluding DataFrame,
| Splitting this off to get a cleaner diff in an upcoming PR(s) that make setitem_with_indexer go through split_path for _all_ DataFrame cases, giving us less special-casing to worry about. | https://api.github.com/repos/pandas-dev/pandas/pulls/37521 | 2020-10-30T19:44:23Z | 2020-10-31T15:12:34Z | 2020-10-31T15:12:34Z | 2020-10-31T15:18:38Z |
DOC: Start v1.1.5 release notes | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 848121f822383..310857faec436 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -24,6 +24,7 @@ Version 1.1
.. toctree::
:maxdepth: 2
+ v1.1.5
v1.1.4
v1.1.3
v1.1.2
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index fb8687b8ba42c..6353dbfafc9f1 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -52,4 +52,4 @@ Bug fixes
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.1.3..v1.1.4|HEAD
+.. contributors:: v1.1.3..v1.1.4
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
new file mode 100644
index 0000000000000..cf728d94b2a55
--- /dev/null
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -0,0 +1,36 @@
+.. _whatsnew_115:
+
+What's new in 1.1.5 (??)
+------------------------
+
+These are the changes in pandas 1.1.5. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_115.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_115.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_115.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.1.4..v1.1.5|HEAD
| https://api.github.com/repos/pandas-dev/pandas/pulls/37520 | 2020-10-30T15:56:23Z | 2020-10-31T15:13:14Z | 2020-10-31T15:13:14Z | 2020-11-01T16:34:23Z | |
Backport PR #37499 on branch 1.1.x (REGR: fix isin for large series with nan and mixed object dtype (causing regression in read_csv)) | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index ad348db21f8c9..fb8687b8ba42c 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :func:`read_csv` raising a ``ValueError`` when ``names`` was of type ``dict_keys`` (:issue:`36928`)
+- Fixed regression in :func:`read_csv` with more than 1M rows and specifying a ``index_col`` argument (:issue:`37094`)
- Fixed regression where attempting to mutate a :class:`DateOffset` object would no longer raise an ``AttributeError`` (:issue:`36940`)
- Fixed regression where :meth:`DataFrame.agg` would fail with :exc:`TypeError` when passed positional arguments to be passed on to the aggregation function (:issue:`36948`).
- Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 67ab3a8548f21..48d4fe65942fe 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -440,7 +440,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
if len(comps) > 1_000_000 and not is_object_dtype(comps):
# If the the values include nan we need to check for nan explicitly
# since np.nan it not equal to np.nan
- if np.isnan(values).any():
+ if isna(values).any():
f = lambda c, v: np.logical_or(np.in1d(c, v), np.isnan(c))
else:
f = np.in1d
diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py
index 9f425168540ba..33e24c55d44d9 100644
--- a/pandas/tests/io/parser/test_index_col.py
+++ b/pandas/tests/io/parser/test_index_col.py
@@ -207,3 +207,18 @@ def test_header_with_index_col(all_parsers):
result = parser.read_csv(StringIO(data), index_col="I11", header=0)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.slow
+def test_index_col_large_csv(all_parsers):
+ # https://github.com/pandas-dev/pandas/issues/37094
+ parser = all_parsers
+
+ N = 1_000_001
+ df = DataFrame({"a": range(N), "b": np.random.randn(N)})
+
+ with tm.ensure_clean() as path:
+ df.to_csv(path, index=False)
+ result = parser.read_csv(path, index_col=[0])
+
+ tm.assert_frame_equal(result, df.set_index("a"))
diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py
index 62766c692f4df..86ea2b2f02a4d 100644
--- a/pandas/tests/series/methods/test_isin.py
+++ b/pandas/tests/series/methods/test_isin.py
@@ -89,3 +89,13 @@ def test_isin_read_only(self):
result = s.isin(arr)
expected = Series([True, True, True])
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.slow
+def test_isin_large_series_mixed_dtypes_and_nan():
+ # https://github.com/pandas-dev/pandas/issues/37094
+ # combination of object dtype for the values and > 1_000_000 elements
+ ser = Series([1, 2, np.nan] * 1_000_000)
+ result = ser.isin({"foo", "bar"})
+ expected = Series([False] * 3 * 1_000_000)
+ tm.assert_series_equal(result, expected)
| Backport PR #37499: REGR: fix isin for large series with nan and mixed object dtype (causing regression in read_csv) | https://api.github.com/repos/pandas-dev/pandas/pulls/37517 | 2020-10-30T11:03:40Z | 2020-10-30T11:53:33Z | 2020-10-30T11:53:33Z | 2020-10-30T11:53:34Z |
Backport PR #37508: REGR: inplace Series op not actually operating inplace | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 6cb728800dc68..397df3ce96b6b 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -29,6 +29,7 @@ Fixed regressions
- Fixed regression in :class:`StataReader` which required ``chunksize`` to be manually set when using an iterator to read a dataset (:issue:`37280`)
- Fixed regression in setitem with :meth:`DataFrame.iloc` which raised error when trying to set a value while filtering with a boolean list (:issue:`36741`)
- Fixed regression in :attr:`MultiIndex.is_monotonic_increasing` returning wrong results with ``NaN`` in at least one of the levels (:issue:`37220`)
+- Fixed regression in inplace arithmetic operation on a Series not updating the parent DataFrame (:issue:`36373`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py
index c60b67fa2f4f6..6a44178e3c704 100644
--- a/pandas/core/ops/methods.py
+++ b/pandas/core/ops/methods.py
@@ -93,8 +93,19 @@ def _wrap_inplace_method(method):
def f(self, other):
result = method(self, other)
+
+ if (
+ self.ndim == 1
+ and result._indexed_same(self)
+ and result.dtype == self.dtype
+ ):
+ # GH#36498 this inplace op can _actually_ be inplace.
+ self._values[:] = result._values
+ return self
+
# Delete cacher
self._reset_cacher()
+
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 166f26f668502..d9c7585d55a1b 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -1566,3 +1566,16 @@ def test_arith_reindex_with_duplicates():
result = df1 + df2
expected = pd.DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"])
tm.assert_frame_equal(result, expected)
+
+
+def test_inplace_arithmetic_series_update():
+ # https://github.com/pandas-dev/pandas/issues/36373
+ df = DataFrame({"A": [1, 2, 3]})
+ series = df["A"]
+ vals = series._values
+
+ series += 1
+ assert series._values is vals
+
+ expected = DataFrame({"A": [2, 3, 4]})
+ tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 193800fae751f..b56a92ce71605 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -896,6 +896,7 @@ def test_identity_slice_returns_new_object(self):
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [7, 8, 9])
+ @pytest.mark.xfail(reason="accidental fix reverted - GH37497")
def test_loc_copy_vs_view(self):
# GH 15631
x = DataFrame(zip(range(3), range(3)), columns=["a", "b"])
| Backport of https://github.com/pandas-dev/pandas/pull/37508 | https://api.github.com/repos/pandas-dev/pandas/pulls/37515 | 2020-10-30T09:06:46Z | 2020-10-30T10:24:41Z | 2020-10-30T10:24:41Z | 2020-10-30T10:50:13Z |
Backport PR #37502: REGR: revert behaviour of getitem with assigning with a Series | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 6cb728800dc68..7d35e8b12b9b8 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -28,6 +28,7 @@ Fixed regressions
- Fixed regression in certain offsets (:meth:`pd.offsets.Day() <pandas.tseries.offsets.Day>` and below) no longer being hashable (:issue:`37267`)
- Fixed regression in :class:`StataReader` which required ``chunksize`` to be manually set when using an iterator to read a dataset (:issue:`37280`)
- Fixed regression in setitem with :meth:`DataFrame.iloc` which raised error when trying to set a value while filtering with a boolean list (:issue:`36741`)
+- Fixed regression in setitem with a Series getting aligned before setting the values (:issue:`37427`)
- Fixed regression in :attr:`MultiIndex.is_monotonic_increasing` returning wrong results with ``NaN`` in at least one of the levels (:issue:`37220`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b6ff7b33d27cb..18a201674db65 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1037,10 +1037,8 @@ def _set_with_engine(self, key, value):
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
- # extract_array so that if we set e.g. ser[-5:] = ser[:5]
- # we get the first five values, and not 5 NaNs
indexer = self.index._convert_slice_indexer(key, kind="getitem")
- self.iloc[indexer] = extract_array(value, extract_numpy=True)
+ return self._set_values(indexer, value)
else:
assert not isinstance(key, tuple)
@@ -1058,12 +1056,26 @@ def _set_with(self, key, value):
# should be caught by the is_bool_indexer check in __setitem__
if key_type == "integer":
if not self.index._should_fallback_to_positional():
- self.loc[key] = value
+ self._set_labels(key, value)
else:
- self.iloc[key] = value
+ self._set_values(key, value)
else:
self.loc[key] = value
+ def _set_labels(self, key, value):
+ key = com.asarray_tuplesafe(key)
+ indexer: np.ndarray = self.index.get_indexer(key)
+ mask = indexer == -1
+ if mask.any():
+ raise KeyError(f"{key[mask]} not in index")
+ self._set_values(indexer, value)
+
+ def _set_values(self, key, value):
+ if isinstance(key, Series):
+ key = key._values
+ self._mgr = self._mgr.setitem(indexer=key, value=value)
+ self._maybe_update_cacher()
+
def _set_value(self, label, value, takeable: bool = False):
"""
Quickly set single value at passed label.
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 6b7cda89a4714..cf03dfb8ca9b7 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -137,3 +137,13 @@ def test_getitem_ndim_deprecated():
s = pd.Series([0, 1])
with tm.assert_produces_warning(FutureWarning):
s[:, None]
+
+
+def test_getitem_assignment_series_aligment():
+ # https://github.com/pandas-dev/pandas/issues/37427
+ # with getitem, when assigning with a Series, it is not first aligned
+ s = Series(range(10))
+ idx = np.array([2, 4, 9])
+ s[idx] = Series([10, 11, 12])
+ expected = Series([0, 1, 10, 3, 11, 5, 6, 7, 8, 12])
+ tm.assert_series_equal(s, expected)
| Backport of https://github.com/pandas-dev/pandas/pull/37502 | https://api.github.com/repos/pandas-dev/pandas/pulls/37514 | 2020-10-30T08:42:39Z | 2020-10-30T09:35:45Z | 2020-10-30T09:35:45Z | 2020-10-30T09:35:48Z |
TST: setting value at MultiIndex slice using .loc | diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 8fb418ab78307..eb84f771204f6 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1062,6 +1062,24 @@ def test_loc_getitem_access_none_value_in_multiindex(self):
result = ser.loc[("Level1", "Level2_a")]
assert result == 1
+ def test_loc_setitem_multiindex_slice(self):
+ # GH 34870
+
+ index = pd.MultiIndex.from_tuples(
+ zip(
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ),
+ names=["first", "second"],
+ )
+
+ result = Series([1, 1, 1, 1, 1, 1, 1, 1], index=index)
+ result.loc[("baz", "one"):("foo", "two")] = 100
+
+ expected = Series([1, 1, 100, 100, 100, 100, 1, 1], index=index)
+
+ tm.assert_series_equal(result, expected)
+
def test_series_loc_getitem_label_list_missing_values():
# gh-11428
| - [x] closes #34870
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37513 | 2020-10-30T03:38:24Z | 2020-10-31T17:19:06Z | 2020-10-31T17:19:06Z | 2020-10-31T17:19:09Z |
CLN: remove _vendored/typing_extensions | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 315aeb6423254..d9e48ab9b6189 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -104,13 +104,13 @@ repos:
language: python
entry: python scripts/validate_unwanted_patterns.py --validation-type="private_import_across_module"
types: [python]
- exclude: ^(asv_bench|pandas/_vendored|pandas/tests|doc)/
+ exclude: ^(asv_bench|pandas/tests|doc)/
- id: unwanted-patterns-private-function-across-module
name: Check for use of private functions across modules
language: python
entry: python scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module"
types: [python]
- exclude: ^(asv_bench|pandas/_vendored|pandas/tests|doc)/
+ exclude: ^(asv_bench|pandas/tests|doc)/
- repo: https://github.com/asottile/yesqa
rev: v1.2.2
hooks:
diff --git a/Makefile b/Makefile
index b915d8840cd8d..4f71df51de360 100644
--- a/Makefile
+++ b/Makefile
@@ -30,11 +30,11 @@ check:
python3 scripts/validate_unwanted_patterns.py \
--validation-type="private_function_across_module" \
--included-file-extensions="py" \
- --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored \
+ --excluded-file-paths=pandas/tests,asv_bench/ \
pandas/
python3 scripts/validate_unwanted_patterns.py \
--validation-type="private_import_across_module" \
--included-file-extensions="py" \
- --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored,doc/
+ --excluded-file-paths=pandas/tests,asv_bench/,doc/
pandas/
diff --git a/pandas/_vendored/__init__.py b/pandas/_vendored/__init__.py
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/pandas/_vendored/typing_extensions.py b/pandas/_vendored/typing_extensions.py
deleted file mode 100644
index 6efbbe9302952..0000000000000
--- a/pandas/_vendored/typing_extensions.py
+++ /dev/null
@@ -1,2465 +0,0 @@
-"""
-vendored copy of typing_extensions, copied from
-https://raw.githubusercontent.com/python/typing/master/typing_extensions/src_py3/typing_extensions.py
-
-on 2020-08-30.
-
-typing_extensions is distributed under the Python Software Foundation License.
-
-This is not a direct copy/paste of the original file. Changes are:
- - this docstring
- - ran `black`
- - ran `isort`
- - edited strings split by black to adhere to pandas style conventions
- - AsyncContextManager is defined without `exec`
- - python2-style super usages are updated
- - replace foo[dot]__class__ with type(foo)
- - Change a comment-syntax annotation in a docstring to newer syntax
-"""
-
-# These are used by Protocol implementation
-# We use internal typing helpers here, but this significantly reduces
-# code duplication. (Also this is only until Protocol is in typing.)
-import abc
-import collections
-import collections.abc as collections_abc
-import contextlib
-import operator
-import sys
-import typing
-from typing import Callable, Generic, Tuple, TypeVar
-
-# After PEP 560, internal typing API was substantially reworked.
-# This is especially important for Protocol class which uses internal APIs
-# quite extensivelly.
-PEP_560 = sys.version_info[:3] >= (3, 7, 0)
-
-if PEP_560:
- GenericMeta = TypingMeta = type
-else:
- from typing import GenericMeta, TypingMeta
-OLD_GENERICS = False
-try:
- from typing import _next_in_mro, _type_check, _type_vars
-except ImportError:
- OLD_GENERICS = True
-try:
- from typing import _subs_tree # noqa
-
- SUBS_TREE = True
-except ImportError:
- SUBS_TREE = False
-try:
- from typing import _tp_cache
-except ImportError:
-
- def _tp_cache(x):
- return x
-
-
-try:
- from typing import _TypingEllipsis, _TypingEmpty
-except ImportError:
-
- class _TypingEllipsis:
- pass
-
- class _TypingEmpty:
- pass
-
-
-# The two functions below are copies of typing internal helpers.
-# They are needed by _ProtocolMeta
-
-
-def _no_slots_copy(dct):
- dict_copy = dict(dct)
- if "__slots__" in dict_copy:
- for slot in dict_copy["__slots__"]:
- dict_copy.pop(slot, None)
- return dict_copy
-
-
-def _check_generic(cls, parameters):
- if not cls.__parameters__:
- raise TypeError("%s is not a generic class" % repr(cls))
- alen = len(parameters)
- elen = len(cls.__parameters__)
- if alen != elen:
- raise TypeError(
- "Too %s parameters for %s; actual %s, expected %s"
- % ("many" if alen > elen else "few", repr(cls), alen, elen)
- )
-
-
-if hasattr(typing, "_generic_new"):
- _generic_new = typing._generic_new
-else:
- # Note: The '_generic_new(...)' function is used as a part of the
- # process of creating a generic type and was added to the typing module
- # as of Python 3.5.3.
- #
- # We've defined '_generic_new(...)' below to exactly match the behavior
- # implemented in older versions of 'typing' bundled with Python 3.5.0 to
- # 3.5.2. This helps eliminate redundancy when defining collection types
- # like 'Deque' later.
- #
- # See https://github.com/python/typing/pull/308 for more details -- in
- # particular, compare and contrast the definition of types like
- # 'typing.List' before and after the merge.
-
- def _generic_new(base_cls, cls, *args, **kwargs):
- return base_cls.__new__(cls, *args, **kwargs)
-
-
-# See https://github.com/python/typing/pull/439
-if hasattr(typing, "_geqv"):
- from typing import _geqv
-
- _geqv_defined = True
-else:
- _geqv = None
- _geqv_defined = False
-
-if sys.version_info[:2] >= (3, 6):
- import _collections_abc
-
- _check_methods_in_mro = _collections_abc._check_methods
-else:
-
- def _check_methods_in_mro(C, *methods):
- mro = C.__mro__
- for method in methods:
- for B in mro:
- if method in B.__dict__:
- if B.__dict__[method] is None:
- return NotImplemented
- break
- else:
- return NotImplemented
- return True
-
-
-# Please keep __all__ alphabetized within each category.
-__all__ = [
- # Super-special typing primitives.
- "ClassVar",
- "Final",
- "Type",
- # ABCs (from collections.abc).
- # The following are added depending on presence
- # of their non-generic counterparts in stdlib:
- # 'Awaitable',
- # 'AsyncIterator',
- # 'AsyncIterable',
- # 'Coroutine',
- # 'AsyncGenerator',
- # 'AsyncContextManager',
- # 'ChainMap',
- # Concrete collection types.
- "ContextManager",
- "Counter",
- "Deque",
- "DefaultDict",
- "TypedDict",
- # Structural checks, a.k.a. protocols.
- "SupportsIndex",
- # One-off things.
- "final",
- "IntVar",
- "Literal",
- "NewType",
- "overload",
- "Text",
- "TYPE_CHECKING",
-]
-
-# Annotated relies on substitution trees of pep 560. It will not work for
-# versions of typing older than 3.5.3
-HAVE_ANNOTATED = PEP_560 or SUBS_TREE
-
-if PEP_560:
- __all__.extend(["get_args", "get_origin", "get_type_hints"])
-
-if HAVE_ANNOTATED:
- __all__.append("Annotated")
-
-# Protocols are hard to backport to the original version of typing 3.5.0
-HAVE_PROTOCOLS = sys.version_info[:3] != (3, 5, 0)
-
-if HAVE_PROTOCOLS:
- __all__.extend(["Protocol", "runtime", "runtime_checkable"])
-
-
-# TODO
-if hasattr(typing, "NoReturn"):
- NoReturn = typing.NoReturn
-elif hasattr(typing, "_FinalTypingBase"):
-
- class _NoReturn(typing._FinalTypingBase, _root=True):
- """Special type indicating functions that never return.
- Example::
-
- from typing import NoReturn
-
- def stop() -> NoReturn:
- raise Exception('no way')
-
- This type is invalid in other positions, e.g., ``List[NoReturn]``
- will fail in static type checkers.
- """
-
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError("NoReturn cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("NoReturn cannot be used with issubclass().")
-
- NoReturn = _NoReturn(_root=True)
-else:
-
- class _NoReturnMeta(typing.TypingMeta):
- """Metaclass for NoReturn"""
-
- def __new__(cls, name, bases, namespace, _root=False):
- return super().__new__(cls, name, bases, namespace, _root=_root)
-
- def __instancecheck__(self, obj):
- raise TypeError("NoReturn cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("NoReturn cannot be used with issubclass().")
-
- class NoReturn(typing.Final, metaclass=_NoReturnMeta, _root=True):
- """Special type indicating functions that never return.
- Example::
-
- from typing import NoReturn
-
- def stop() -> NoReturn:
- raise Exception('no way')
-
- This type is invalid in other positions, e.g., ``List[NoReturn]``
- will fail in static type checkers.
- """
-
- __slots__ = ()
-
-
-# Some unconstrained type variables. These are used by the container types.
-# (These are not for export.)
-T = typing.TypeVar("T") # Any type.
-KT = typing.TypeVar("KT") # Key type.
-VT = typing.TypeVar("VT") # Value type.
-T_co = typing.TypeVar("T_co", covariant=True) # Any type covariant containers.
-V_co = typing.TypeVar("V_co", covariant=True) # Any type covariant containers.
-VT_co = typing.TypeVar("VT_co", covariant=True) # Value type covariant containers.
-T_contra = typing.TypeVar("T_contra", contravariant=True) # Ditto contravariant.
-
-
-if hasattr(typing, "ClassVar"):
- ClassVar = typing.ClassVar
-elif hasattr(typing, "_FinalTypingBase"):
-
- class _ClassVar(typing._FinalTypingBase, _root=True):
- """Special type construct to mark class variables.
-
- An annotation wrapped in ClassVar indicates that a given
- attribute is intended to be used as a class variable and
- should not be set on instances of that class. Usage::
-
- class Starship:
- stats: ClassVar[Dict[str, int]] = {} # class variable
- damage: int = 10 # instance variable
-
- ClassVar accepts only types and cannot be further subscribed.
-
- Note that ClassVar is not a class itself, and should not
- be used with isinstance() or issubclass().
- """
-
- __slots__ = ("__type__",)
-
- def __init__(self, tp=None, **kwds):
- self.__type__ = tp
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is None:
- return cls(
- typing._type_check(
- item, "{} accepts only single type.".format(cls.__name__[1:])
- ),
- _root=True,
- )
- raise TypeError("{} cannot be further subscripted".format(cls.__name__[1:]))
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(new_tp, _root=True)
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += "[{}]".format(typing._type_repr(self.__type__))
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, _ClassVar):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- ClassVar = _ClassVar(_root=True)
-else:
-
- class _ClassVarMeta(typing.TypingMeta):
- """Metaclass for ClassVar"""
-
- def __new__(cls, name, bases, namespace, tp=None, _root=False):
- self = super().__new__(cls, name, bases, namespace, _root=_root)
- if tp is not None:
- self.__type__ = tp
- return self
-
- def __instancecheck__(self, obj):
- raise TypeError("ClassVar cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("ClassVar cannot be used with issubclass().")
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is not None:
- raise TypeError(
- "{} cannot be further subscripted".format(cls.__name__[1:])
- )
-
- param = typing._type_check(
- item, "{} accepts only single type.".format(cls.__name__[1:])
- )
- return cls(
- self.__name__, self.__bases__, dict(self.__dict__), tp=param, _root=True
- )
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(
- self.__name__,
- self.__bases__,
- dict(self.__dict__),
- tp=self.__type__,
- _root=True,
- )
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += "[{}]".format(typing._type_repr(self.__type__))
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, ClassVar):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- class ClassVar(typing.Final, metaclass=_ClassVarMeta, _root=True):
- """Special type construct to mark class variables.
-
- An annotation wrapped in ClassVar indicates that a given
- attribute is intended to be used as a class variable and
- should not be set on instances of that class. Usage::
-
- class Starship:
- stats: ClassVar[Dict[str, int]] = {} # class variable
- damage: int = 10 # instance variable
-
- ClassVar accepts only types and cannot be further subscribed.
-
- Note that ClassVar is not a class itself, and should not
- be used with isinstance() or issubclass().
- """
-
- __type__ = None
-
-
-# On older versions of typing there is an internal class named "Final".
-if hasattr(typing, "Final") and sys.version_info[:2] >= (3, 7):
- Final = typing.Final
-elif sys.version_info[:2] >= (3, 7):
-
- class _FinalForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return "typing_extensions." + self._name
-
- def __getitem__(self, parameters):
- item = typing._type_check(
- parameters, f"{self._name} accepts only single type"
- )
- return _GenericAlias(self, (item,))
-
- Final = _FinalForm(
- "Final",
- doc="""A special typing construct to indicate that a name
- cannot be re-assigned or overridden in a subclass.
- For example:
-
- MAX_SIZE: Final = 9000
- MAX_SIZE += 1 # Error reported by type checker
-
- class Connection:
- TIMEOUT: Final[int] = 10
- class FastConnector(Connection):
- TIMEOUT = 1 # Error reported by type checker
-
- There is no runtime checking of these properties.""",
- )
-elif hasattr(typing, "_FinalTypingBase"):
-
- class _Final(typing._FinalTypingBase, _root=True):
- """A special typing construct to indicate that a name
- cannot be re-assigned or overridden in a subclass.
- For example:
-
- MAX_SIZE: Final = 9000
- MAX_SIZE += 1 # Error reported by type checker
-
- class Connection:
- TIMEOUT: Final[int] = 10
- class FastConnector(Connection):
- TIMEOUT = 1 # Error reported by type checker
-
- There is no runtime checking of these properties.
- """
-
- __slots__ = ("__type__",)
-
- def __init__(self, tp=None, **kwds):
- self.__type__ = tp
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is None:
- return cls(
- typing._type_check(
- item, "{} accepts only single type.".format(cls.__name__[1:])
- ),
- _root=True,
- )
- raise TypeError("{} cannot be further subscripted".format(cls.__name__[1:]))
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(new_tp, _root=True)
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += "[{}]".format(typing._type_repr(self.__type__))
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, _Final):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- Final = _Final(_root=True)
-else:
-
- class _FinalMeta(typing.TypingMeta):
- """Metaclass for Final"""
-
- def __new__(cls, name, bases, namespace, tp=None, _root=False):
- self = super().__new__(cls, name, bases, namespace, _root=_root)
- if tp is not None:
- self.__type__ = tp
- return self
-
- def __instancecheck__(self, obj):
- raise TypeError("Final cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("Final cannot be used with issubclass().")
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is not None:
- raise TypeError(
- "{} cannot be further subscripted".format(cls.__name__[1:])
- )
-
- param = typing._type_check(
- item, "{} accepts only single type.".format(cls.__name__[1:])
- )
- return cls(
- self.__name__, self.__bases__, dict(self.__dict__), tp=param, _root=True
- )
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(
- self.__name__,
- self.__bases__,
- dict(self.__dict__),
- tp=self.__type__,
- _root=True,
- )
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += "[{}]".format(typing._type_repr(self.__type__))
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, Final):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- class Final(typing.Final, metaclass=_FinalMeta, _root=True):
- """A special typing construct to indicate that a name
- cannot be re-assigned or overridden in a subclass.
- For example:
-
- MAX_SIZE: Final = 9000
- MAX_SIZE += 1 # Error reported by type checker
-
- class Connection:
- TIMEOUT: Final[int] = 10
- class FastConnector(Connection):
- TIMEOUT = 1 # Error reported by type checker
-
- There is no runtime checking of these properties.
- """
-
- __type__ = None
-
-
-if hasattr(typing, "final"):
- final = typing.final
-else:
-
- def final(f):
- """This decorator can be used to indicate to type checkers that
- the decorated method cannot be overridden, and decorated class
- cannot be subclassed. For example:
-
- class Base:
- @final
- def done(self) -> None:
- ...
- class Sub(Base):
- def done(self) -> None: # Error reported by type checker
- ...
- @final
- class Leaf:
- ...
- class Other(Leaf): # Error reported by type checker
- ...
-
- There is no runtime checking of these properties.
- """
- return f
-
-
-def IntVar(name):
- return TypeVar(name)
-
-
-if hasattr(typing, "Literal"):
- Literal = typing.Literal
-elif sys.version_info[:2] >= (3, 7):
-
- class _LiteralForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return "typing_extensions." + self._name
-
- def __getitem__(self, parameters):
- return _GenericAlias(self, parameters)
-
- Literal = _LiteralForm(
- "Literal",
- doc="""A type that can be used to indicate to type checkers
- that the corresponding value has a value literally equivalent
- to the provided parameter. For example:
-
- var: Literal[4] = 4
-
- The type checker understands that 'var' is literally equal to
- the value 4 and no other value.
-
- Literal[...] cannot be subclassed. There is no runtime
- checking verifying that the parameter is actually a value
- instead of a type.""",
- )
-elif hasattr(typing, "_FinalTypingBase"):
-
- class _Literal(typing._FinalTypingBase, _root=True):
- """A type that can be used to indicate to type checkers that the
- corresponding value has a value literally equivalent to the
- provided parameter. For example:
-
- var: Literal[4] = 4
-
- The type checker understands that 'var' is literally equal to the
- value 4 and no other value.
-
- Literal[...] cannot be subclassed. There is no runtime checking
- verifying that the parameter is actually a value instead of a type.
- """
-
- __slots__ = ("__values__",)
-
- def __init__(self, values=None, **kwds):
- self.__values__ = values
-
- def __getitem__(self, values):
- cls = type(self)
- if self.__values__ is None:
- if not isinstance(values, tuple):
- values = (values,)
- return cls(values, _root=True)
- raise TypeError("{} cannot be further subscripted".format(cls.__name__[1:]))
-
- def _eval_type(self, globalns, localns):
- return self
-
- def __repr__(self):
- r = super().__repr__()
- if self.__values__ is not None:
- r += "[{}]".format(", ".join(map(typing._type_repr, self.__values__)))
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__values__))
-
- def __eq__(self, other):
- if not isinstance(other, _Literal):
- return NotImplemented
- if self.__values__ is not None:
- return self.__values__ == other.__values__
- return self is other
-
- Literal = _Literal(_root=True)
-else:
-
- class _LiteralMeta(typing.TypingMeta):
- """Metaclass for Literal"""
-
- def __new__(cls, name, bases, namespace, values=None, _root=False):
- self = super().__new__(cls, name, bases, namespace, _root=_root)
- if values is not None:
- self.__values__ = values
- return self
-
- def __instancecheck__(self, obj):
- raise TypeError("Literal cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("Literal cannot be used with issubclass().")
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__values__ is not None:
- raise TypeError(
- "{} cannot be further subscripted".format(cls.__name__[1:])
- )
-
- if not isinstance(item, tuple):
- item = (item,)
- return cls(
- self.__name__,
- self.__bases__,
- dict(self.__dict__),
- values=item,
- _root=True,
- )
-
- def _eval_type(self, globalns, localns):
- return self
-
- def __repr__(self):
- r = super().__repr__()
- if self.__values__ is not None:
- r += "[{}]".format(", ".join(map(typing._type_repr, self.__values__)))
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__values__))
-
- def __eq__(self, other):
- if not isinstance(other, Literal):
- return NotImplemented
- if self.__values__ is not None:
- return self.__values__ == other.__values__
- return self is other
-
- class Literal(typing.Final, metaclass=_LiteralMeta, _root=True):
- """A type that can be used to indicate to type checkers that the
- corresponding value has a value literally equivalent to the
- provided parameter. For example:
-
- var: Literal[4] = 4
-
- The type checker understands that 'var' is literally equal to the
- value 4 and no other value.
-
- Literal[...] cannot be subclassed. There is no runtime checking
- verifying that the parameter is actually a value instead of a type.
- """
-
- __values__ = None
-
-
-def _overload_dummy(*args, **kwds):
- """Helper for @overload to raise when called."""
- raise NotImplementedError(
- "You should not call an overloaded function. "
- "A series of @overload-decorated functions "
- "outside a stub module should always be followed "
- "by an implementation that is not @overload-ed."
- )
-
-
-def overload(func):
- """Decorator for overloaded functions/methods.
-
- In a stub file, place two or more stub definitions for the same
- function in a row, each decorated with @overload. For example:
-
- @overload
- def utf8(value: None) -> None: ...
- @overload
- def utf8(value: bytes) -> bytes: ...
- @overload
- def utf8(value: str) -> bytes: ...
-
- In a non-stub file (i.e. a regular .py file), do the same but
- follow it with an implementation. The implementation should *not*
- be decorated with @overload. For example:
-
- @overload
- def utf8(value: None) -> None: ...
- @overload
- def utf8(value: bytes) -> bytes: ...
- @overload
- def utf8(value: str) -> bytes: ...
- def utf8(value):
- # implementation goes here
- """
- return _overload_dummy
-
-
-# This is not a real generic class. Don't use outside annotations.
-if hasattr(typing, "Type"):
- Type = typing.Type
-else:
- # Internal type variable used for Type[].
- CT_co = typing.TypeVar("CT_co", covariant=True, bound=type)
-
- class Type(typing.Generic[CT_co], extra=type):
- """A special construct usable to annotate class objects.
-
- For example, suppose we have the following classes::
-
- class User: ... # Abstract base for User classes
- class BasicUser(User): ...
- class ProUser(User): ...
- class TeamUser(User): ...
-
- And a function that takes a class argument that's a subclass of
- User and returns an instance of the corresponding class::
-
- U = TypeVar('U', bound=User)
- def new_user(user_class: Type[U]) -> U:
- user = user_class()
- # (Here we could write the user object to a database)
- return user
- joe = new_user(BasicUser)
-
- At this point the type checker knows that joe has type BasicUser.
- """
-
- __slots__ = ()
-
-
-# Various ABCs mimicking those in collections.abc.
-# A few are simply re-exported for completeness.
-
-
-def _define_guard(type_name):
- """
- Returns True if the given type isn't defined in typing but
- is defined in collections_abc.
-
- Adds the type to __all__ if the collection is found in either
- typing or collection_abc.
- """
- if hasattr(typing, type_name):
- __all__.append(type_name)
- globals()[type_name] = getattr(typing, type_name)
- return False
- elif hasattr(collections_abc, type_name):
- __all__.append(type_name)
- return True
- else:
- return False
-
-
-class _ExtensionsGenericMeta(GenericMeta):
- def __subclasscheck__(self, subclass):
- """This mimics a more modern GenericMeta.__subclasscheck__() logic
- (that does not have problems with recursion) to work around interactions
- between collections, typing, and typing_extensions on older
- versions of Python, see https://github.com/python/typing/issues/501.
- """
- if sys.version_info[:3] >= (3, 5, 3) or sys.version_info[:3] < (3, 5, 0):
- if self.__origin__ is not None:
- if sys._getframe(1).f_globals["__name__"] not in ["abc", "functools"]:
- raise TypeError(
- "Parameterized generics cannot be used with class "
- "or instance checks"
- )
- return False
- if not self.__extra__:
- return super().__subclasscheck__(subclass)
- res = self.__extra__.__subclasshook__(subclass)
- if res is not NotImplemented:
- return res
- if self.__extra__ in subclass.__mro__:
- return True
- for scls in self.__extra__.__subclasses__():
- if isinstance(scls, GenericMeta):
- continue
- if issubclass(subclass, scls):
- return True
- return False
-
-
-if _define_guard("Awaitable"):
-
- class Awaitable(
- typing.Generic[T_co],
- metaclass=_ExtensionsGenericMeta,
- extra=collections_abc.Awaitable,
- ):
- __slots__ = ()
-
-
-if _define_guard("Coroutine"):
-
- class Coroutine(
- Awaitable[V_co],
- typing.Generic[T_co, T_contra, V_co],
- metaclass=_ExtensionsGenericMeta,
- extra=collections_abc.Coroutine,
- ):
- __slots__ = ()
-
-
-if _define_guard("AsyncIterable"):
-
- class AsyncIterable(
- typing.Generic[T_co],
- metaclass=_ExtensionsGenericMeta,
- extra=collections_abc.AsyncIterable,
- ):
- __slots__ = ()
-
-
-if _define_guard("AsyncIterator"):
-
- class AsyncIterator(
- AsyncIterable[T_co],
- metaclass=_ExtensionsGenericMeta,
- extra=collections_abc.AsyncIterator,
- ):
- __slots__ = ()
-
-
-if hasattr(typing, "Deque"):
- Deque = typing.Deque
-elif _geqv_defined:
-
- class Deque(
- collections.deque,
- typing.MutableSequence[T],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.deque,
- ):
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if _geqv(cls, Deque):
- return collections.deque(*args, **kwds)
- return _generic_new(collections.deque, cls, *args, **kwds)
-
-
-else:
-
- class Deque(
- collections.deque,
- typing.MutableSequence[T],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.deque,
- ):
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is Deque:
- return collections.deque(*args, **kwds)
- return _generic_new(collections.deque, cls, *args, **kwds)
-
-
-if hasattr(typing, "ContextManager"):
- ContextManager = typing.ContextManager
-elif hasattr(contextlib, "AbstractContextManager"):
-
- class ContextManager(
- typing.Generic[T_co],
- metaclass=_ExtensionsGenericMeta,
- extra=contextlib.AbstractContextManager,
- ):
- __slots__ = ()
-
-
-else:
-
- class ContextManager(typing.Generic[T_co]):
- __slots__ = ()
-
- def __enter__(self):
- return self
-
- @abc.abstractmethod
- def __exit__(self, exc_type, exc_value, traceback):
- return None
-
- @classmethod
- def __subclasshook__(cls, C):
- if cls is ContextManager:
- # In Python 3.6+, it is possible to set a method to None to
- # explicitly indicate that the class does not implement an ABC
- # (https://bugs.python.org/issue25958), but we do not support
- # that pattern here because this fallback class is only used
- # in Python 3.5 and earlier.
- if any("__enter__" in B.__dict__ for B in C.__mro__) and any(
- "__exit__" in B.__dict__ for B in C.__mro__
- ):
- return True
- return NotImplemented
-
-
-if hasattr(typing, "AsyncContextManager"):
- AsyncContextManager = typing.AsyncContextManager
- __all__.append("AsyncContextManager")
-elif hasattr(contextlib, "AbstractAsyncContextManager"):
-
- class AsyncContextManager(
- typing.Generic[T_co],
- metaclass=_ExtensionsGenericMeta,
- extra=contextlib.AbstractAsyncContextManager,
- ):
- __slots__ = ()
-
- __all__.append("AsyncContextManager")
-
-else:
-
- class AsyncContextManager(typing.Generic[T_co]):
- __slots__ = ()
-
- async def __aenter__(self):
- return self
-
- @abc.abstractmethod
- async def __aexit__(self, exc_type, exc_value, traceback):
- return None
-
- @classmethod
- def __subclasshook__(cls, C):
- if cls is AsyncContextManager:
- return _check_methods_in_mro(C, "__aenter__", "__aexit__")
- return NotImplemented
-
- __all__.append("AsyncContextManager")
-
-
-if hasattr(typing, "DefaultDict"):
- DefaultDict = typing.DefaultDict
-elif _geqv_defined:
-
- class DefaultDict(
- collections.defaultdict,
- typing.MutableMapping[KT, VT],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.defaultdict,
- ):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if _geqv(cls, DefaultDict):
- return collections.defaultdict(*args, **kwds)
- return _generic_new(collections.defaultdict, cls, *args, **kwds)
-
-
-else:
-
- class DefaultDict(
- collections.defaultdict,
- typing.MutableMapping[KT, VT],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.defaultdict,
- ):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is DefaultDict:
- return collections.defaultdict(*args, **kwds)
- return _generic_new(collections.defaultdict, cls, *args, **kwds)
-
-
-if hasattr(typing, "Counter"):
- Counter = typing.Counter
-elif (3, 5, 0) <= sys.version_info[:3] <= (3, 5, 1):
- assert _geqv_defined
- _TInt = typing.TypeVar("_TInt")
-
- class _CounterMeta(typing.GenericMeta):
- """Metaclass for Counter"""
-
- def __getitem__(self, item):
- return super().__getitem__((item, int))
-
- class Counter(
- collections.Counter,
- typing.Dict[T, int],
- metaclass=_CounterMeta,
- extra=collections.Counter,
- ):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if _geqv(cls, Counter):
- return collections.Counter(*args, **kwds)
- return _generic_new(collections.Counter, cls, *args, **kwds)
-
-
-elif _geqv_defined:
-
- class Counter(
- collections.Counter,
- typing.Dict[T, int],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.Counter,
- ):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if _geqv(cls, Counter):
- return collections.Counter(*args, **kwds)
- return _generic_new(collections.Counter, cls, *args, **kwds)
-
-
-else:
-
- class Counter(
- collections.Counter,
- typing.Dict[T, int],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.Counter,
- ):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is Counter:
- return collections.Counter(*args, **kwds)
- return _generic_new(collections.Counter, cls, *args, **kwds)
-
-
-if hasattr(typing, "ChainMap"):
- ChainMap = typing.ChainMap
- __all__.append("ChainMap")
-elif hasattr(collections, "ChainMap"):
- # ChainMap only exists in 3.3+
- if _geqv_defined:
-
- class ChainMap(
- collections.ChainMap,
- typing.MutableMapping[KT, VT],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.ChainMap,
- ):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if _geqv(cls, ChainMap):
- return collections.ChainMap(*args, **kwds)
- return _generic_new(collections.ChainMap, cls, *args, **kwds)
-
- else:
-
- class ChainMap(
- collections.ChainMap,
- typing.MutableMapping[KT, VT],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.ChainMap,
- ):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is ChainMap:
- return collections.ChainMap(*args, **kwds)
- return _generic_new(collections.ChainMap, cls, *args, **kwds)
-
- __all__.append("ChainMap")
-
-
-if _define_guard("AsyncGenerator"):
-
- class AsyncGenerator(
- AsyncIterator[T_co],
- typing.Generic[T_co, T_contra],
- metaclass=_ExtensionsGenericMeta,
- extra=collections_abc.AsyncGenerator,
- ):
- __slots__ = ()
-
-
-if hasattr(typing, "NewType"):
- NewType = typing.NewType
-else:
-
- def NewType(name, tp):
- """NewType creates simple unique types with almost zero
- runtime overhead. NewType(name, tp) is considered a subtype of tp
- by static type checkers. At runtime, NewType(name, tp) returns
- a dummy function that simply returns its argument. Usage::
-
- UserId = NewType('UserId', int)
-
- def name_by_id(user_id: UserId) -> str:
- ...
-
- UserId('user') # Fails type check
-
- name_by_id(42) # Fails type check
- name_by_id(UserId(42)) # OK
-
- num: int = UserId(5) + 1
- """
-
- def new_type(x):
- return x
-
- new_type.__name__ = name
- new_type.__supertype__ = tp
- return new_type
-
-
-if hasattr(typing, "Text"):
- Text = typing.Text
-else:
- Text = str
-
-
-if hasattr(typing, "TYPE_CHECKING"):
- TYPE_CHECKING = typing.TYPE_CHECKING
-else:
- # Constant that's True when type checking, but False here.
- TYPE_CHECKING = False
-
-
-def _gorg(cls):
- """This function exists for compatibility with old typing versions."""
- assert isinstance(cls, GenericMeta)
- if hasattr(cls, "_gorg"):
- return cls._gorg
- while cls.__origin__ is not None:
- cls = cls.__origin__
- return cls
-
-
-if OLD_GENERICS:
-
- def _next_in_mro(cls): # noqa
- """This function exists for compatibility with old typing versions."""
- next_in_mro = object
- for i, c in enumerate(cls.__mro__[:-1]):
- if isinstance(c, GenericMeta) and _gorg(c) is Generic:
- next_in_mro = cls.__mro__[i + 1]
- return next_in_mro
-
-
-_PROTO_WHITELIST = [
- "Callable",
- "Awaitable",
- "Iterable",
- "Iterator",
- "AsyncIterable",
- "AsyncIterator",
- "Hashable",
- "Sized",
- "Container",
- "Collection",
- "Reversible",
- "ContextManager",
- "AsyncContextManager",
-]
-
-
-def _get_protocol_attrs(cls):
- attrs = set()
- for base in cls.__mro__[:-1]: # without object
- if base.__name__ in ("Protocol", "Generic"):
- continue
- annotations = getattr(base, "__annotations__", {})
- for attr in list(base.__dict__.keys()) + list(annotations.keys()):
- if not attr.startswith("_abc_") and attr not in (
- "__abstractmethods__",
- "__annotations__",
- "__weakref__",
- "_is_protocol",
- "_is_runtime_protocol",
- "__dict__",
- "__args__",
- "__slots__",
- "__next_in_mro__",
- "__parameters__",
- "__origin__",
- "__orig_bases__",
- "__extra__",
- "__tree_hash__",
- "__doc__",
- "__subclasshook__",
- "__init__",
- "__new__",
- "__module__",
- "_MutableMapping__marker",
- "_gorg",
- ):
- attrs.add(attr)
- return attrs
-
-
-def _is_callable_members_only(cls):
- return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
-
-
-if hasattr(typing, "Protocol"):
- Protocol = typing.Protocol
-elif HAVE_PROTOCOLS and not PEP_560:
-
- class _ProtocolMeta(GenericMeta):
- """Internal metaclass for Protocol.
-
- This exists so Protocol classes can be generic without deriving
- from Generic.
- """
-
- if not OLD_GENERICS:
-
- def __new__(
- cls,
- name,
- bases,
- namespace,
- tvars=None,
- args=None,
- origin=None,
- extra=None,
- orig_bases=None,
- ):
- # This is just a version copied from GenericMeta.__new__ that
- # includes "Protocol" special treatment. (Comments removed for brevity.)
- assert extra is None # Protocols should not have extra
- if tvars is not None:
- assert origin is not None
- assert all(isinstance(t, TypeVar) for t in tvars), tvars
- else:
- tvars = _type_vars(bases)
- gvars = None
- for base in bases:
- if base is Generic:
- raise TypeError("Cannot inherit from plain Generic")
- if isinstance(base, GenericMeta) and base.__origin__ in (
- Generic,
- Protocol,
- ):
- if gvars is not None:
- raise TypeError(
- "Cannot inherit from Generic[...] or "
- "Protocol[...] multiple times."
- )
- gvars = base.__parameters__
- if gvars is None:
- gvars = tvars
- else:
- tvarset = set(tvars)
- gvarset = set(gvars)
- if not tvarset <= gvarset:
- raise TypeError(
- "Some type variables (%s) "
- "are not listed in %s[%s]"
- % (
- ", ".join(
- str(t) for t in tvars if t not in gvarset
- ),
- "Generic"
- if any(b.__origin__ is Generic for b in bases)
- else "Protocol",
- ", ".join(str(g) for g in gvars),
- )
- )
- tvars = gvars
-
- initial_bases = bases
- if (
- extra is not None
- and type(extra) is abc.ABCMeta
- and extra not in bases
- ):
- bases = (extra,) + bases
- bases = tuple(
- _gorg(b) if isinstance(b, GenericMeta) else b for b in bases
- )
- if any(isinstance(b, GenericMeta) and b is not Generic for b in bases):
- bases = tuple(b for b in bases if b is not Generic)
- namespace.update({"__origin__": origin, "__extra__": extra})
- self = super().__new__(cls, name, bases, namespace, _root=True)
- super().__setattr__("_gorg", self if not origin else _gorg(origin))
- self.__parameters__ = tvars
- self.__args__ = (
- tuple(
- ... if a is _TypingEllipsis else () if a is _TypingEmpty else a
- for a in args
- )
- if args
- else None
- )
- self.__next_in_mro__ = _next_in_mro(self)
- if orig_bases is None:
- self.__orig_bases__ = initial_bases
- elif origin is not None:
- self._abc_registry = origin._abc_registry
- self._abc_cache = origin._abc_cache
- if hasattr(self, "_subs_tree"):
- self.__tree_hash__ = (
- hash(self._subs_tree()) if origin else super().__hash__()
- )
- return self
-
- def __init__(cls, *args, **kwargs):
- super().__init__(*args, **kwargs)
- if not cls.__dict__.get("_is_protocol", None):
- cls._is_protocol = any(
- b is Protocol
- or isinstance(b, _ProtocolMeta)
- and b.__origin__ is Protocol
- for b in cls.__bases__
- )
- if cls._is_protocol:
- for base in cls.__mro__[1:]:
- if not (
- base in (object, Generic)
- or base.__module__ == "collections.abc"
- and base.__name__ in _PROTO_WHITELIST
- or isinstance(base, TypingMeta)
- and base._is_protocol
- or isinstance(base, GenericMeta)
- and base.__origin__ is Generic
- ):
- raise TypeError(
- "Protocols can only inherit from other "
- "protocols, got %r" % base
- )
-
- def _no_init(self, *args, **kwargs):
- if type(self)._is_protocol:
- raise TypeError("Protocols cannot be instantiated")
-
- cls.__init__ = _no_init
-
- def _proto_hook(other):
- if not cls.__dict__.get("_is_protocol", None):
- return NotImplemented
- if not isinstance(other, type):
- # Same error as for issubclass(1, int)
- raise TypeError("issubclass() arg 1 must be a class")
- for attr in _get_protocol_attrs(cls):
- for base in other.__mro__:
- if attr in base.__dict__:
- if base.__dict__[attr] is None:
- return NotImplemented
- break
- annotations = getattr(base, "__annotations__", {})
- if (
- isinstance(annotations, typing.Mapping)
- and attr in annotations
- and isinstance(other, _ProtocolMeta)
- and other._is_protocol
- ):
- break
- else:
- return NotImplemented
- return True
-
- if "__subclasshook__" not in cls.__dict__:
- cls.__subclasshook__ = _proto_hook
-
- def __instancecheck__(self, instance):
- # We need this method for situations where attributes are
- # assigned in __init__.
- if (
- not getattr(self, "_is_protocol", False)
- or _is_callable_members_only(self)
- ) and issubclass(type(instance), self):
- return True
- if self._is_protocol:
- if all(
- hasattr(instance, attr)
- and (
- not callable(getattr(self, attr, None))
- or getattr(instance, attr) is not None
- )
- for attr in _get_protocol_attrs(self)
- ):
- return True
- return super().__instancecheck__(instance)
-
- def __subclasscheck__(self, cls):
- if self.__origin__ is not None:
- if sys._getframe(1).f_globals["__name__"] not in ["abc", "functools"]:
- raise TypeError(
- "Parameterized generics cannot be used with class "
- "or instance checks"
- )
- return False
- if self.__dict__.get("_is_protocol", None) and not self.__dict__.get(
- "_is_runtime_protocol", None
- ):
- if sys._getframe(1).f_globals["__name__"] in [
- "abc",
- "functools",
- "typing",
- ]:
- return False
- raise TypeError(
- "Instance and class checks can only be used with "
- "@runtime protocols"
- )
- if self.__dict__.get(
- "_is_runtime_protocol", None
- ) and not _is_callable_members_only(self):
- if sys._getframe(1).f_globals["__name__"] in [
- "abc",
- "functools",
- "typing",
- ]:
- return super().__subclasscheck__(cls)
- raise TypeError(
- "Protocols with non-method members don't support issubclass()"
- )
- return super().__subclasscheck__(cls)
-
- if not OLD_GENERICS:
-
- @_tp_cache
- def __getitem__(self, params):
- # We also need to copy this from GenericMeta.__getitem__ to get
- # special treatment of "Protocol". (Comments removed for brevity.)
- if not isinstance(params, tuple):
- params = (params,)
- if not params and _gorg(self) is not Tuple:
- raise TypeError(
- "Parameter list to %s[...] cannot be empty" % self.__qualname__
- )
- msg = "Parameters to generic types must be types."
- params = tuple(_type_check(p, msg) for p in params)
- if self in (Generic, Protocol):
- if not all(isinstance(p, TypeVar) for p in params):
- raise TypeError(
- "Parameters to %r[...] must all be type variables" % self
- )
- if len(set(params)) != len(params):
- raise TypeError(
- "Parameters to %r[...] must all be unique" % self
- )
- tvars = params
- args = params
- elif self in (Tuple, Callable):
- tvars = _type_vars(params)
- args = params
- elif self.__origin__ in (Generic, Protocol):
- raise TypeError(
- "Cannot subscript already-subscripted %s" % repr(self)
- )
- else:
- _check_generic(self, params)
- tvars = _type_vars(params)
- args = params
-
- prepend = (self,) if self.__origin__ is None else ()
- return type(self)(
- self.__name__,
- prepend + self.__bases__,
- _no_slots_copy(self.__dict__),
- tvars=tvars,
- args=args,
- origin=self,
- extra=self.__extra__,
- orig_bases=self.__orig_bases__,
- )
-
- class Protocol(metaclass=_ProtocolMeta):
- """Base class for protocol classes. Protocol classes are defined as::
-
- class Proto(Protocol):
- def meth(self) -> int:
- ...
-
- Such classes are primarily used with static type checkers that recognize
- structural subtyping (static duck-typing), for example::
-
- class C:
- def meth(self) -> int:
- return 0
-
- def func(x: Proto) -> int:
- return x.meth()
-
- func(C()) # Passes static type check
-
- See PEP 544 for details. Protocol classes decorated with
- @typing_extensions.runtime act as simple-minded runtime protocol that checks
- only the presence of given attributes, ignoring their type signatures.
-
- Protocol classes can be generic, they are defined as::
-
- class GenProto({bases}):
- def meth(self) -> T:
- ...
- """
-
- __slots__ = ()
- _is_protocol = True
-
- def __new__(cls, *args, **kwds):
- if _gorg(cls) is Protocol:
- raise TypeError(
- "Type Protocol cannot be instantiated; "
- "it can be used only as a base class"
- )
- if OLD_GENERICS:
- return _generic_new(_next_in_mro(cls), cls, *args, **kwds)
- return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
-
- if Protocol.__doc__ is not None:
- Protocol.__doc__ = Protocol.__doc__.format(
- bases="Protocol, Generic[T]" if OLD_GENERICS else "Protocol[T]"
- )
-
-
-elif PEP_560:
- from typing import _collect_type_vars, _GenericAlias, _type_check # noqa
-
- class _ProtocolMeta(abc.ABCMeta):
- # This metaclass is a bit unfortunate and exists only because of the lack
- # of __instancehook__.
- def __instancecheck__(cls, instance):
- # We need this method for situations where attributes are
- # assigned in __init__.
- if (
- not getattr(cls, "_is_protocol", False)
- or _is_callable_members_only(cls)
- ) and issubclass(type(instance), cls):
- return True
- if cls._is_protocol:
- if all(
- hasattr(instance, attr)
- and (
- not callable(getattr(cls, attr, None))
- or getattr(instance, attr) is not None
- )
- for attr in _get_protocol_attrs(cls)
- ):
- return True
- return super().__instancecheck__(instance)
-
- class Protocol(metaclass=_ProtocolMeta):
- # There is quite a lot of overlapping code with typing.Generic.
- # Unfortunately it is hard to avoid this while these live in two different
- # modules. The duplicated code will be removed when Protocol is moved to typing.
- """Base class for protocol classes. Protocol classes are defined as::
-
- class Proto(Protocol):
- def meth(self) -> int:
- ...
-
- Such classes are primarily used with static type checkers that recognize
- structural subtyping (static duck-typing), for example::
-
- class C:
- def meth(self) -> int:
- return 0
-
- def func(x: Proto) -> int:
- return x.meth()
-
- func(C()) # Passes static type check
-
- See PEP 544 for details. Protocol classes decorated with
- @typing_extensions.runtime act as simple-minded runtime protocol that checks
- only the presence of given attributes, ignoring their type signatures.
-
- Protocol classes can be generic, they are defined as::
-
- class GenProto(Protocol[T]):
- def meth(self) -> T:
- ...
- """
- __slots__ = ()
- _is_protocol = True
-
- def __new__(cls, *args, **kwds):
- if cls is Protocol:
- raise TypeError(
- "Type Protocol cannot be instantiated; "
- "it can only be used as a base class"
- )
- return super().__new__(cls)
-
- @_tp_cache
- def __class_getitem__(cls, params):
- if not isinstance(params, tuple):
- params = (params,)
- if not params and cls is not Tuple:
- raise TypeError(
- f"Parameter list to {cls.__qualname__}[...] cannot be empty"
- )
- msg = "Parameters to generic types must be types."
- params = tuple(_type_check(p, msg) for p in params)
- if cls is Protocol:
- # Generic can only be subscripted with unique type variables.
- if not all(isinstance(p, TypeVar) for p in params):
- i = 0
- while isinstance(params[i], TypeVar):
- i += 1
- raise TypeError(
- "Parameters to Protocol[...] must all be type variables. "
- "Parameter {} is {}".format(i + 1, params[i])
- )
- if len(set(params)) != len(params):
- raise TypeError("Parameters to Protocol[...] must all be unique")
- else:
- # Subscripting a regular Generic subclass.
- _check_generic(cls, params)
- return _GenericAlias(cls, params)
-
- def __init_subclass__(cls, *args, **kwargs):
- tvars = []
- if "__orig_bases__" in cls.__dict__:
- error = Generic in cls.__orig_bases__
- else:
- error = Generic in cls.__bases__
- if error:
- raise TypeError("Cannot inherit from plain Generic")
- if "__orig_bases__" in cls.__dict__:
- tvars = _collect_type_vars(cls.__orig_bases__)
- # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
- # If found, tvars must be a subset of it.
- # If not found, tvars is it.
- # Also check for and reject plain Generic,
- # and reject multiple Generic[...] and/or Protocol[...].
- gvars = None
- for base in cls.__orig_bases__:
- if isinstance(base, _GenericAlias) and base.__origin__ in (
- Generic,
- Protocol,
- ):
- # for error messages
- the_base = (
- "Generic" if base.__origin__ is Generic else "Protocol"
- )
- if gvars is not None:
- raise TypeError(
- "Cannot inherit from Generic[...] "
- "and/or Protocol[...] multiple types."
- )
- gvars = base.__parameters__
- if gvars is None:
- gvars = tvars
- else:
- tvarset = set(tvars)
- gvarset = set(gvars)
- if not tvarset <= gvarset:
- s_vars = ", ".join(str(t) for t in tvars if t not in gvarset)
- s_args = ", ".join(str(g) for g in gvars)
- raise TypeError(
- "Some type variables ({}) are "
- "not listed in {}[{}]".format(s_vars, the_base, s_args)
- )
- tvars = gvars
- cls.__parameters__ = tuple(tvars)
-
- # Determine if this is a protocol or a concrete subclass.
- if not cls.__dict__.get("_is_protocol", None):
- cls._is_protocol = any(b is Protocol for b in cls.__bases__)
-
- # Set (or override) the protocol subclass hook.
- def _proto_hook(other):
- if not cls.__dict__.get("_is_protocol", None):
- return NotImplemented
- if not getattr(cls, "_is_runtime_protocol", False):
- if sys._getframe(2).f_globals["__name__"] in ["abc", "functools"]:
- return NotImplemented
- raise TypeError(
- "Instance and class checks can only be used with "
- "@runtime protocols"
- )
- if not _is_callable_members_only(cls):
- if sys._getframe(2).f_globals["__name__"] in ["abc", "functools"]:
- return NotImplemented
- raise TypeError(
- "Protocols with non-method members "
- "don't support issubclass()"
- )
- if not isinstance(other, type):
- # Same error as for issubclass(1, int)
- raise TypeError("issubclass() arg 1 must be a class")
- for attr in _get_protocol_attrs(cls):
- for base in other.__mro__:
- if attr in base.__dict__:
- if base.__dict__[attr] is None:
- return NotImplemented
- break
- annotations = getattr(base, "__annotations__", {})
- if (
- isinstance(annotations, typing.Mapping)
- and attr in annotations
- and isinstance(other, _ProtocolMeta)
- and other._is_protocol
- ):
- break
- else:
- return NotImplemented
- return True
-
- if "__subclasshook__" not in cls.__dict__:
- cls.__subclasshook__ = _proto_hook
-
- # We have nothing more to do for non-protocols.
- if not cls._is_protocol:
- return
-
- # Check consistency of bases.
- for base in cls.__bases__:
- if not (
- base in (object, Generic)
- or base.__module__ == "collections.abc"
- and base.__name__ in _PROTO_WHITELIST
- or isinstance(base, _ProtocolMeta)
- and base._is_protocol
- ):
- raise TypeError(
- "Protocols can only inherit from other "
- "protocols, got %r" % base
- )
-
- def _no_init(self, *args, **kwargs):
- if type(self)._is_protocol:
- raise TypeError("Protocols cannot be instantiated")
-
- cls.__init__ = _no_init
-
-
-if hasattr(typing, "runtime_checkable"):
- runtime_checkable = typing.runtime_checkable
-elif HAVE_PROTOCOLS:
-
- def runtime_checkable(cls):
- """Mark a protocol class as a runtime protocol, so that it
- can be used with isinstance() and issubclass(). Raise TypeError
- if applied to a non-protocol class.
-
- This allows a simple-minded structural check very similar to the
- one-offs in collections.abc such as Hashable.
- """
- if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
- raise TypeError(
- "@runtime_checkable can be only applied to protocol classes, "
- "got %r" % cls
- )
- cls._is_runtime_protocol = True
- return cls
-
-
-if HAVE_PROTOCOLS:
- # Exists for backwards compatibility.
- runtime = runtime_checkable
-
-
-if hasattr(typing, "SupportsIndex"):
- SupportsIndex = typing.SupportsIndex
-elif HAVE_PROTOCOLS:
-
- @runtime_checkable
- class SupportsIndex(Protocol):
- __slots__ = ()
-
- @abc.abstractmethod
- def __index__(self) -> int:
- pass
-
-
-if sys.version_info[:2] >= (3, 9):
- # The standard library TypedDict in Python 3.8 does not store runtime information
- # about which (if any) keys are optional. See https://bugs.python.org/issue38834
- TypedDict = typing.TypedDict
-else:
-
- def _check_fails(cls, other):
- try:
- if sys._getframe(1).f_globals["__name__"] not in [
- "abc",
- "functools",
- "typing",
- ]:
- # Typed dicts are only for static structural subtyping.
- raise TypeError("TypedDict does not support instance and class checks")
- except (AttributeError, ValueError):
- pass
- return False
-
- def _dict_new(*args, **kwargs):
- if not args:
- raise TypeError("TypedDict.__new__(): not enough arguments")
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
- return dict(*args, **kwargs)
-
- _dict_new.__text_signature__ = "($cls, _typename, _fields=None, /, **kwargs)"
-
- def _typeddict_new(*args, total=True, **kwargs):
- if not args:
- raise TypeError("TypedDict.__new__(): not enough arguments")
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
- if args:
- typename, args = (
- args[0],
- args[1:],
- ) # allow the "_typename" keyword be passed
- elif "_typename" in kwargs:
- typename = kwargs.pop("_typename")
- import warnings
-
- warnings.warn(
- "Passing '_typename' as keyword argument is deprecated",
- DeprecationWarning,
- stacklevel=2,
- )
- else:
- raise TypeError(
- "TypedDict.__new__() missing 1 required positional "
- "argument: '_typename'"
- )
- if args:
- try:
- (fields,) = args # allow the "_fields" keyword be passed
- except ValueError:
- raise TypeError(
- "TypedDict.__new__() takes from 2 to 3 "
- "positional arguments but {} "
- "were given".format(len(args) + 2)
- )
- elif "_fields" in kwargs and len(kwargs) == 1:
- fields = kwargs.pop("_fields")
- import warnings
-
- warnings.warn(
- "Passing '_fields' as keyword argument is deprecated",
- DeprecationWarning,
- stacklevel=2,
- )
- else:
- fields = None
-
- if fields is None:
- fields = kwargs
- elif kwargs:
- raise TypeError(
- "TypedDict takes either a dict or keyword arguments, but not both"
- )
-
- ns = {"__annotations__": dict(fields), "__total__": total}
- try:
- # Setting correct module is necessary to make typed dict classes pickleable.
- ns["__module__"] = sys._getframe(1).f_globals.get("__name__", "__main__")
- except (AttributeError, ValueError):
- pass
-
- return _TypedDictMeta(typename, (), ns)
-
- _typeddict_new.__text_signature__ = (
- "($cls, _typename, _fields=None, /, *, total=True, **kwargs)"
- )
-
- class _TypedDictMeta(type):
- def __new__(cls, name, bases, ns, total=True):
- # Create new typed dict class object.
- # This method is called directly when TypedDict is subclassed,
- # or via _typeddict_new when TypedDict is instantiated. This way
- # TypedDict supports all three syntaxes described in its docstring.
- # Subclasses and instances of TypedDict return actual dictionaries
- # via _dict_new.
- ns["__new__"] = _typeddict_new if name == "TypedDict" else _dict_new
- tp_dict = super().__new__(cls, name, (dict,), ns)
-
- annotations = {}
- own_annotations = ns.get("__annotations__", {})
- own_annotation_keys = set(own_annotations.keys())
- msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
- own_annotations = {
- n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
- }
- required_keys = set()
- optional_keys = set()
-
- for base in bases:
- annotations.update(base.__dict__.get("__annotations__", {}))
- required_keys.update(base.__dict__.get("__required_keys__", ()))
- optional_keys.update(base.__dict__.get("__optional_keys__", ()))
-
- annotations.update(own_annotations)
- if total:
- required_keys.update(own_annotation_keys)
- else:
- optional_keys.update(own_annotation_keys)
-
- tp_dict.__annotations__ = annotations
- tp_dict.__required_keys__ = frozenset(required_keys)
- tp_dict.__optional_keys__ = frozenset(optional_keys)
- if not hasattr(tp_dict, "__total__"):
- tp_dict.__total__ = total
- return tp_dict
-
- __instancecheck__ = __subclasscheck__ = _check_fails
-
- TypedDict = _TypedDictMeta("TypedDict", (dict,), {})
- TypedDict.__module__ = __name__
- TypedDict.__doc__ = """A simple typed name space. At runtime it is equivalent to a plain dict.
-
- TypedDict creates a dictionary type that expects all of its
- instances to have a certain set of keys, with each key
- associated with a value of a consistent type. This expectation
- is not checked at runtime but is only enforced by type checkers.
- Usage::
-
- class Point2D(TypedDict):
- x: int
- y: int
- label: str
-
- a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
- b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
-
- assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
-
- The type info can be accessed via the Point2D.__annotations__ dict, and
- the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
- TypedDict supports two additional equivalent forms::
-
- Point2D = TypedDict('Point2D', x=int, y=int, label=str)
- Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
-
- The class syntax is only supported in Python 3.6+, while two other
- syntax forms work for Python 2.7 and 3.2+
- """
-
-
-# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
-if hasattr(typing, "Annotated"):
- Annotated = typing.Annotated
- get_type_hints = typing.get_type_hints
- # Not exported and not a public API, but needed for get_origin() and get_args()
- # to work.
- _AnnotatedAlias = typing._AnnotatedAlias
-elif PEP_560:
-
- class _AnnotatedAlias(typing._GenericAlias, _root=True):
- """Runtime representation of an annotated type.
-
- At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
- with extra annotations. The alias behaves like a normal typing alias,
- instantiating is the same as instantiating the underlying type, binding
- it to types is also the same.
- """
-
- def __init__(self, origin, metadata):
- if isinstance(origin, _AnnotatedAlias):
- metadata = origin.__metadata__ + metadata
- origin = origin.__origin__
- super().__init__(origin, origin)
- self.__metadata__ = metadata
-
- def copy_with(self, params):
- assert len(params) == 1
- new_type = params[0]
- return _AnnotatedAlias(new_type, self.__metadata__)
-
- def __repr__(self):
- return "typing_extensions.Annotated[{}, {}]".format(
- typing._type_repr(self.__origin__),
- ", ".join(repr(a) for a in self.__metadata__),
- )
-
- def __reduce__(self):
- return operator.getitem, (Annotated, (self.__origin__,) + self.__metadata__)
-
- def __eq__(self, other):
- if not isinstance(other, _AnnotatedAlias):
- return NotImplemented
- if self.__origin__ != other.__origin__:
- return False
- return self.__metadata__ == other.__metadata__
-
- def __hash__(self):
- return hash((self.__origin__, self.__metadata__))
-
- class Annotated:
- """Add context specific metadata to a type.
-
- Example: Annotated[int, runtime_check.Unsigned] indicates to the
- hypothetical runtime_check module that this type is an unsigned int.
- Every other consumer of this type can ignore this metadata and treat
- this type as int.
-
- The first argument to Annotated must be a valid type (and will be in
- the __origin__ field), the remaining arguments are kept as a tuple in
- the __extra__ field.
-
- Details:
-
- - It's an error to call `Annotated` with less than two arguments.
- - Nested Annotated are flattened::
-
- Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
-
- - Instantiating an annotated type is equivalent to instantiating the
- underlying type::
-
- Annotated[C, Ann1](5) == C(5)
-
- - Annotated can be used as a generic type alias::
-
- Optimized = Annotated[T, runtime.Optimize()]
- Optimized[int] == Annotated[int, runtime.Optimize()]
-
- OptimizedList = Annotated[List[T], runtime.Optimize()]
- OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
- """
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwargs):
- raise TypeError("Type Annotated cannot be instantiated.")
-
- @_tp_cache
- def __class_getitem__(cls, params):
- if not isinstance(params, tuple) or len(params) < 2:
- raise TypeError(
- "Annotated[...] should be used "
- "with at least two arguments (a type and an "
- "annotation)."
- )
- msg = "Annotated[t, ...]: t must be a type."
- origin = typing._type_check(params[0], msg)
- metadata = tuple(params[1:])
- return _AnnotatedAlias(origin, metadata)
-
- def __init_subclass__(cls, *args, **kwargs):
- raise TypeError(f"Cannot subclass {cls.__module__}.Annotated")
-
- def _strip_annotations(t):
- """Strips the annotations from a given type."""
- if isinstance(t, _AnnotatedAlias):
- return _strip_annotations(t.__origin__)
- if isinstance(t, typing._GenericAlias):
- stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
- if stripped_args == t.__args__:
- return t
- res = t.copy_with(stripped_args)
- res._special = t._special
- return res
- return t
-
- def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
- """Return type hints for an object.
-
- This is often the same as obj.__annotations__, but it handles
- forward references encoded as string literals, adds Optional[t] if a
- default value equal to None is set and recursively replaces all
- 'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
-
- The argument may be a module, class, method, or function. The annotations
- are returned as a dictionary. For classes, annotations include also
- inherited members.
-
- TypeError is raised if the argument is not of a type that can contain
- annotations, and an empty dictionary is returned if no annotations are
- present.
-
- BEWARE -- the behavior of globalns and localns is counterintuitive
- (unless you are familiar with how eval and exec work). The
- search order is locals first, then globals.
-
- - If no dict arguments are passed, an attempt is made to use the
- globals from obj (or the respective module's globals for classes),
- and these are also used as the locals. If the object does not appear
- to have globals, an empty dictionary is used.
-
- - If one dict argument is passed, it is used for both globals and
- locals.
-
- - If two dict arguments are passed, they specify globals and
- locals, respectively.
- """
- hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
- if include_extras:
- return hint
- return {k: _strip_annotations(t) for k, t in hint.items()}
-
-
-elif HAVE_ANNOTATED:
-
- def _is_dunder(name):
- """Returns True if name is a __dunder_variable_name__."""
- return len(name) > 4 and name.startswith("__") and name.endswith("__")
-
- # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
- # checks, argument expansion etc. are done on the _subs_tre. As a result we
- # can't provide a get_type_hints function that strips out annotations.
-
- class AnnotatedMeta(typing.GenericMeta):
- """Metaclass for Annotated"""
-
- def __new__(cls, name, bases, namespace, **kwargs):
- if any(b is not object for b in bases):
- raise TypeError("Cannot subclass " + str(Annotated))
- return super().__new__(cls, name, bases, namespace, **kwargs)
-
- @property
- def __metadata__(self):
- return self._subs_tree()[2]
-
- def _tree_repr(self, tree):
- cls, origin, metadata = tree
- if not isinstance(origin, tuple):
- tp_repr = typing._type_repr(origin)
- else:
- tp_repr = origin[0]._tree_repr(origin)
- metadata_reprs = ", ".join(repr(arg) for arg in metadata)
- return f"{cls}[{tp_repr}, {metadata_reprs}]"
-
- def _subs_tree(self, tvars=None, args=None): # noqa
- if self is Annotated:
- return Annotated
- res = super()._subs_tree(tvars=tvars, args=args)
- # Flatten nested Annotated
- if isinstance(res[1], tuple) and res[1][0] is Annotated:
- sub_tp = res[1][1]
- sub_annot = res[1][2]
- return (Annotated, sub_tp, sub_annot + res[2])
- return res
-
- def _get_cons(self):
- """Return the class used to create instance of this type."""
- if self.__origin__ is None:
- raise TypeError(
- "Cannot get the underlying type of a "
- "non-specialized Annotated type."
- )
- tree = self._subs_tree()
- while isinstance(tree, tuple) and tree[0] is Annotated:
- tree = tree[1]
- if isinstance(tree, tuple):
- return tree[0]
- else:
- return tree
-
- @_tp_cache
- def __getitem__(self, params):
- if not isinstance(params, tuple):
- params = (params,)
- if self.__origin__ is not None: # specializing an instantiated type
- return super().__getitem__(params)
- elif not isinstance(params, tuple) or len(params) < 2:
- raise TypeError(
- "Annotated[...] should be instantiated "
- "with at least two arguments (a type and an "
- "annotation)."
- )
- else:
- msg = "Annotated[t, ...]: t must be a type."
- tp = typing._type_check(params[0], msg)
- metadata = tuple(params[1:])
- return type(self)(
- self.__name__,
- self.__bases__,
- _no_slots_copy(self.__dict__),
- tvars=_type_vars((tp,)),
- # Metadata is a tuple so it won't be touched by _replace_args et al.
- args=(tp, metadata),
- origin=self,
- )
-
- def __call__(self, *args, **kwargs):
- cons = self._get_cons()
- result = cons(*args, **kwargs)
- try:
- result.__orig_class__ = self
- except AttributeError:
- pass
- return result
-
- def __getattr__(self, attr):
- # For simplicity we just don't relay all dunder names
- if self.__origin__ is not None and not _is_dunder(attr):
- return getattr(self._get_cons(), attr)
- raise AttributeError(attr)
-
- def __setattr__(self, attr, value):
- if _is_dunder(attr) or attr.startswith("_abc_"):
- super().__setattr__(attr, value)
- elif self.__origin__ is None:
- raise AttributeError(attr)
- else:
- setattr(self._get_cons(), attr, value)
-
- def __instancecheck__(self, obj):
- raise TypeError("Annotated cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("Annotated cannot be used with issubclass().")
-
- class Annotated(metaclass=AnnotatedMeta):
- """Add context specific metadata to a type.
-
- Example: Annotated[int, runtime_check.Unsigned] indicates to the
- hypothetical runtime_check module that this type is an unsigned int.
- Every other consumer of this type can ignore this metadata and treat
- this type as int.
-
- The first argument to Annotated must be a valid type, the remaining
- arguments are kept as a tuple in the __metadata__ field.
-
- Details:
-
- - It's an error to call `Annotated` with less than two arguments.
- - Nested Annotated are flattened::
-
- Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
-
- - Instantiating an annotated type is equivalent to instantiating the
- underlying type::
-
- Annotated[C, Ann1](5) == C(5)
-
- - Annotated can be used as a generic type alias::
-
- Optimized = Annotated[T, runtime.Optimize()]
- Optimized[int] == Annotated[int, runtime.Optimize()]
-
- OptimizedList = Annotated[List[T], runtime.Optimize()]
- OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
- """
-
-
-# Python 3.8 has get_origin() and get_args() but those implementations aren't
-# Annotated-aware, so we can't use those, only Python 3.9 versions will do.
-if sys.version_info[:2] >= (3, 9):
- get_origin = typing.get_origin
- get_args = typing.get_args
-elif PEP_560:
- from typing import _GenericAlias # noqa
-
- def get_origin(tp):
- """Get the unsubscripted version of a type.
-
- This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
- and Annotated. Return None for unsupported types. Examples::
-
- get_origin(Literal[42]) is Literal
- get_origin(int) is None
- get_origin(ClassVar[int]) is ClassVar
- get_origin(Generic) is Generic
- get_origin(Generic[T]) is Generic
- get_origin(Union[T, int]) is Union
- get_origin(List[Tuple[T, T]][int]) == list
- """
- if isinstance(tp, _AnnotatedAlias):
- return Annotated
- if isinstance(tp, _GenericAlias):
- return tp.__origin__
- if tp is Generic:
- return Generic
- return None
-
- def get_args(tp):
- """Get type arguments with all substitutions performed.
-
- For unions, basic simplifications used by Union constructor are performed.
- Examples::
- get_args(Dict[str, int]) == (str, int)
- get_args(int) == ()
- get_args(Union[int, Union[T, int], str][int]) == (int, str)
- get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
- get_args(Callable[[], T][int]) == ([], int)
- """
- if isinstance(tp, _AnnotatedAlias):
- return (tp.__origin__,) + tp.__metadata__
- if isinstance(tp, _GenericAlias):
- res = tp.__args__
- if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
- res = (list(res[:-1]), res[-1])
- return res
- return ()
-
-
-if hasattr(typing, "TypeAlias"):
- TypeAlias = typing.TypeAlias
-elif sys.version_info[:2] >= (3, 9):
-
- class _TypeAliasForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return "typing_extensions." + self._name
-
- @_TypeAliasForm
- def TypeAlias(self, parameters):
- """Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example above.
- """
- raise TypeError(f"{self} is not subscriptable")
-
-
-elif sys.version_info[:2] >= (3, 7):
-
- class _TypeAliasForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return "typing_extensions." + self._name
-
- TypeAlias = _TypeAliasForm(
- "TypeAlias",
- doc="""Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example
- above.""",
- )
-
-elif hasattr(typing, "_FinalTypingBase"):
-
- class _TypeAliasMeta(typing.TypingMeta):
- """Metaclass for TypeAlias"""
-
- def __repr__(self):
- return "typing_extensions.TypeAlias"
-
- class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
- """Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example above.
- """
-
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError("TypeAlias cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("TypeAlias cannot be used with issubclass().")
-
- def __repr__(self):
- return "typing_extensions.TypeAlias"
-
- TypeAlias = _TypeAliasBase(_root=True)
-else:
-
- class _TypeAliasMeta(typing.TypingMeta):
- """Metaclass for TypeAlias"""
-
- def __instancecheck__(self, obj):
- raise TypeError("TypeAlias cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("TypeAlias cannot be used with issubclass().")
-
- def __call__(self, *args, **kwargs):
- raise TypeError("Cannot instantiate TypeAlias")
-
- class TypeAlias(metaclass=_TypeAliasMeta, _root=True):
- """Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example above.
- """
-
- __slots__ = ()
diff --git a/setup.cfg b/setup.cfg
index d8525c12cf13e..b1f423c12ebf4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -70,7 +70,6 @@ omit =
*/tests/*
pandas/_typing.py
pandas/_version.py
- pandas/_vendored/typing_extensions.py
plugins = Cython.Coverage
[coverage:report]
@@ -102,7 +101,7 @@ directory = coverage_html_report
# To be kept consistent with "Import Formatting" section in contributing.rst
[isort]
-known_pre_libs = pandas._config,pandas._vendored
+known_pre_libs = pandas._config
known_pre_core = pandas._libs,pandas._typing,pandas.util._*,pandas.compat,pandas.errors
known_dtypes = pandas.core.dtypes
known_post_core = pandas.tseries,pandas.io,pandas.plotting
@@ -112,7 +111,7 @@ combine_as_imports = True
line_length = 88
force_sort_within_sections = True
skip_glob = env,
-skip = pandas/__init__.py,pandas/_vendored/typing_extensions.py
+skip = pandas/__init__.py
[mypy]
ignore_missing_imports=True
@@ -123,10 +122,6 @@ warn_redundant_casts = True
warn_unused_ignores = True
show_error_codes = True
-[mypy-pandas._vendored.*]
-check_untyped_defs=False
-ignore_errors=True
-
[mypy-pandas.tests.*]
check_untyped_defs=False
| - [x] closes #37119
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
As discussed in #37119, the vendored version does not serve its intended purpose. Moreover, #37137 shows a way to get by without it. | https://api.github.com/repos/pandas-dev/pandas/pulls/37511 | 2020-10-30T02:38:57Z | 2020-10-30T13:20:34Z | 2020-10-30T13:20:34Z | 2020-10-30T14:56:51Z |
CLN: assorted cleanups | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 948ffdc1f7c01..726ca0ce4d776 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -233,6 +233,9 @@ def fillna(self: _T, value=None, method=None, limit=None) -> _T:
new_values = self.copy()
return new_values
+ # ------------------------------------------------------------------------
+ # Reductions
+
def _reduce(self, name: str, skipna: bool = True, **kwargs):
meth = getattr(self, name, None)
if meth:
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index ef8093660b413..b95a7acc19b1f 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -207,8 +207,6 @@ def _from_sequence(
return scalars
periods = np.asarray(scalars, dtype=object)
- if copy:
- periods = periods.copy()
freq = freq or libperiod.extract_freq(periods)
ordinals = libperiod.extract_ordinals(periods, freq)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 17fcd00b7b251..c948291f29aeb 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -120,7 +120,7 @@ class TimedeltaArray(dtl.TimelikeOps):
"ceil",
]
- # Note: ndim must be defined to ensure NaT.__richcmp(TimedeltaArray)
+ # Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)
# operates pointwise.
def _box_func(self, x) -> Union[Timedelta, NaTType]:
@@ -520,7 +520,7 @@ def __mul__(self, other) -> "TimedeltaArray":
def __truediv__(self, other):
# timedelta / X is well-defined for timedelta-like or numeric X
- if isinstance(other, (timedelta, np.timedelta64, Tick)):
+ if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
if other is NaT:
# specifically timedelta64-NaT
@@ -577,7 +577,7 @@ def __truediv__(self, other):
@unpack_zerodim_and_defer("__rtruediv__")
def __rtruediv__(self, other):
# X / timedelta is defined only for timedelta-like X
- if isinstance(other, (timedelta, np.timedelta64, Tick)):
+ if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
if other is NaT:
# specifically timedelta64-NaT
@@ -620,7 +620,7 @@ def __rtruediv__(self, other):
def __floordiv__(self, other):
if is_scalar(other):
- if isinstance(other, (timedelta, np.timedelta64, Tick)):
+ if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
if other is NaT:
# treat this specifically as timedelta-NaT
@@ -684,7 +684,7 @@ def __floordiv__(self, other):
def __rfloordiv__(self, other):
if is_scalar(other):
- if isinstance(other, (timedelta, np.timedelta64, Tick)):
+ if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
if other is NaT:
# treat this specifically as timedelta-NaT
@@ -730,21 +730,21 @@ def __rfloordiv__(self, other):
@unpack_zerodim_and_defer("__mod__")
def __mod__(self, other):
# Note: This is a naive implementation, can likely be optimized
- if isinstance(other, (timedelta, np.timedelta64, Tick)):
+ if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
return self - (self // other) * other
@unpack_zerodim_and_defer("__rmod__")
def __rmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
- if isinstance(other, (timedelta, np.timedelta64, Tick)):
+ if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
return other - (other // self) * self
@unpack_zerodim_and_defer("__divmod__")
def __divmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
- if isinstance(other, (timedelta, np.timedelta64, Tick)):
+ if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
res1 = self // other
@@ -754,7 +754,7 @@ def __divmod__(self, other):
@unpack_zerodim_and_defer("__rdivmod__")
def __rdivmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
- if isinstance(other, (timedelta, np.timedelta64, Tick)):
+ if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
res1 = other // self
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 6b64fd6a20e9a..fb0e710921a5f 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -578,7 +578,7 @@ def _convert_list_indexer(self, keyarr):
return self.get_indexer(keyarr)
@doc(Index._maybe_cast_slice_bound)
- def _maybe_cast_slice_bound(self, label, side, kind):
+ def _maybe_cast_slice_bound(self, label, side: str, kind):
if kind == "loc":
return label
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index d8fab1283dfdc..7f0dc2426eba7 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -831,7 +831,7 @@ def _should_fallback_to_positional(self) -> bool:
# positional in this case
return self.dtype.subtype.kind in ["m", "M"]
- def _maybe_cast_slice_bound(self, label, side, kind):
+ def _maybe_cast_slice_bound(self, label, side: str, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(Index._convert_list_indexer.__doc__)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index facaae3a65f16..546b90249b5ca 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -96,7 +96,7 @@ def _validate_dtype(cls, dtype: Dtype) -> None:
# Indexing Methods
@doc(Index._maybe_cast_slice_bound)
- def _maybe_cast_slice_bound(self, label, side, kind):
+ def _maybe_cast_slice_bound(self, label, side: str, kind):
assert kind in ["loc", "getitem", None]
# we will try to coerce to integers
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 302fead8c8b0c..66fd6943de721 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -8,8 +8,6 @@
from pandas.core.dtypes.common import (
TD64NS_DTYPE,
- is_float,
- is_integer,
is_scalar,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
@@ -246,7 +244,7 @@ def _maybe_cast_slice_bound(self, label, side: str, kind):
return lbound
else:
return lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")
- elif is_integer(label) or is_float(label):
+ elif not isinstance(label, self._data._recognized_scalars):
self._invalid_indexer("slice", label)
return label
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 1132234ae7f8d..2e32a7572adc7 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -29,6 +29,7 @@
from pandas.core.construction import extract_array
if TYPE_CHECKING:
+ from pandas import MultiIndex
from pandas.core.indexes.base import Index
_INT64_MAX = np.iinfo(np.int64).max
@@ -415,7 +416,9 @@ def nargminmax(values, method: str):
return non_nan_idx[func(non_nans)]
-def ensure_key_mapped_multiindex(index, key: Callable, level=None):
+def _ensure_key_mapped_multiindex(
+ index: "MultiIndex", key: Callable, level=None
+) -> "MultiIndex":
"""
Returns a new MultiIndex in which key has been applied
to all levels specified in level (or all levels if level
@@ -441,7 +444,6 @@ def ensure_key_mapped_multiindex(index, key: Callable, level=None):
labels : MultiIndex
Resulting MultiIndex with modified levels.
"""
- from pandas.core.indexes.api import MultiIndex
if level is not None:
if isinstance(level, (str, int)):
@@ -460,7 +462,7 @@ def ensure_key_mapped_multiindex(index, key: Callable, level=None):
for level in range(index.nlevels)
]
- labels = MultiIndex.from_arrays(mapped)
+ labels = type(index).from_arrays(mapped)
return labels
@@ -484,7 +486,7 @@ def ensure_key_mapped(values, key: Optional[Callable], levels=None):
return values
if isinstance(values, ABCMultiIndex):
- return ensure_key_mapped_multiindex(values, key, level=levels)
+ return _ensure_key_mapped_multiindex(values, key, level=levels)
result = key(values.copy())
if len(result) != len(values):
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index f5078930a7b4b..a7c22b4983ed7 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -981,25 +981,6 @@ def test_apply_function_index_return(function):
tm.assert_series_equal(result, expected)
-def test_apply_function_with_indexing():
- # GH: 33058
- df = DataFrame({"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]})
-
- def fn(x):
- x.col2[x.index[-1]] = 0
- return x.col2
-
- result = df.groupby(["col1"], as_index=False).apply(fn)
- expected = Series(
- [1, 2, 0, 4, 5, 0],
- index=pd.MultiIndex.from_tuples(
- [(0, 0), (0, 1), (0, 2), (1, 3), (1, 4), (1, 5)]
- ),
- name="col2",
- )
- tm.assert_series_equal(result, expected)
-
-
def test_apply_function_with_indexing_return_column():
# GH: 7002
df = DataFrame(
| https://api.github.com/repos/pandas-dev/pandas/pulls/37509 | 2020-10-30T02:25:01Z | 2020-10-30T16:04:08Z | 2020-10-30T16:04:08Z | 2020-10-30T17:01:42Z | |
REGR: inplace Series op not actually operating inplace | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index dc0b5d3976489..0951cfdcc5ec7 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -29,6 +29,7 @@ Fixed regressions
- Fixed regression in :class:`StataReader` which required ``chunksize`` to be manually set when using an iterator to read a dataset (:issue:`37280`)
- Fixed regression in setitem with :meth:`DataFrame.iloc` which raised error when trying to set a value while filtering with a boolean list (:issue:`36741`)
- Fixed regression in :attr:`MultiIndex.is_monotonic_increasing` returning wrong results with ``NaN`` in at least one of the levels (:issue:`37220`)
+- Fixed regression in inplace arithmetic operation on a Series not updating the parent DataFrame (:issue:`36373`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fb5d1ec8fd0db..c4ca34c3b74a6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -11115,6 +11115,11 @@ def _inplace_method(self, other, op):
"""
result = op(self, other)
+ if self.ndim == 1 and result._indexed_same(self) and result.dtype == self.dtype:
+ # GH#36498 this inplace op can _actually_ be inplace.
+ self._values[:] = result._values
+ return self
+
# Delete cacher
self._reset_cacher()
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 0ee74beea4858..350b8d01d849a 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -1698,3 +1698,16 @@ def test_arith_list_of_arraylike_raise(to_add):
df + to_add
with pytest.raises(ValueError, match=msg):
to_add + df
+
+
+def test_inplace_arithmetic_series_update():
+ # https://github.com/pandas-dev/pandas/issues/36373
+ df = DataFrame({"A": [1, 2, 3]})
+ series = df["A"]
+ vals = series._values
+
+ series += 1
+ assert series._values is vals
+
+ expected = DataFrame({"A": [2, 3, 4]})
+ tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 8fb418ab78307..5699b7ba5e36c 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -888,6 +888,7 @@ def test_identity_slice_returns_new_object(self):
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [7, 8, 9])
+ @pytest.mark.xfail(reason="accidental fix reverted - GH37497")
def test_loc_copy_vs_view(self):
# GH 15631
x = DataFrame(zip(range(3), range(3)), columns=["a", "b"])
| - [x] closes #36373
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
alternative to #37497, ensures that Series inplace ops are actually inplace whenever possible. The whatsnew is copied verbatim from #37497, the new test is copied with an additional assertion about `series._values` | https://api.github.com/repos/pandas-dev/pandas/pulls/37508 | 2020-10-29T23:52:07Z | 2020-10-30T08:59:45Z | 2020-10-30T08:59:45Z | 2020-10-30T15:00:52Z |
TST/REF: collect indexing tests by method | diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py
index 3be45e2d48e19..6aebc23d1c016 100644
--- a/pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas/tests/frame/methods/test_reset_index.py
@@ -8,6 +8,8 @@
import pandas as pd
from pandas import (
+ Categorical,
+ CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
@@ -542,6 +544,33 @@ def test_reset_index_nat_multiindex(self, ix_data, exp_data):
expected = DataFrame(exp_data)
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize(
+ "codes", ([[0, 0, 1, 1], [0, 1, 0, 1]], [[0, 0, -1, 1], [0, 1, 0, 1]])
+ )
+ def test_rest_index_multiindex_categorical_with_missing_values(self, codes):
+ # GH#24206
+
+ index = MultiIndex(
+ [CategoricalIndex(["A", "B"]), CategoricalIndex(["a", "b"])], codes
+ )
+ data = {"col": range(len(index))}
+ df = DataFrame(data=data, index=index)
+
+ expected = DataFrame(
+ {
+ "level_0": Categorical.from_codes(codes[0], categories=["A", "B"]),
+ "level_1": Categorical.from_codes(codes[1], categories=["a", "b"]),
+ "col": range(4),
+ }
+ )
+
+ res = df.reset_index()
+ tm.assert_frame_equal(res, expected)
+
+ # roundtrip
+ res = expected.set_index(["level_0", "level_1"]).reset_index()
+ tm.assert_frame_equal(res, expected)
+
@pytest.mark.parametrize(
"array, dtype",
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index e4a0d37e3a017..3cd35e900ee06 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -10,13 +10,10 @@
)
from pandas import (
- Categorical,
- CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
- MultiIndex,
Series,
Timestamp,
cut,
@@ -192,32 +189,3 @@ def test_set_reset_index(self):
df = df.set_index("B")
df = df.reset_index()
-
-
-class TestCategoricalIndex:
- @pytest.mark.parametrize(
- "codes", ([[0, 0, 1, 1], [0, 1, 0, 1]], [[0, 0, -1, 1], [0, 1, 0, 1]])
- )
- def test_reindexing_with_missing_values(self, codes):
- # GH 24206
-
- index = MultiIndex(
- [CategoricalIndex(["A", "B"]), CategoricalIndex(["a", "b"])], codes
- )
- data = {"col": range(len(index))}
- df = DataFrame(data=data, index=index)
-
- expected = DataFrame(
- {
- "level_0": Categorical.from_codes(codes[0], categories=["A", "B"]),
- "level_1": Categorical.from_codes(codes[1], categories=["a", "b"]),
- "col": range(4),
- }
- )
-
- res = df.reset_index()
- tm.assert_frame_equal(res, expected)
-
- # roundtrip
- res = expected.set_index(["level_0", "level_1"]).reset_index()
- tm.assert_frame_equal(res, expected)
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index cc448279bfce0..2a15875229e12 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -9,7 +9,7 @@
from pandas._libs.tslibs import conversion, timezones
import pandas as pd
-from pandas import Index, Series, Timestamp, date_range, period_range
+from pandas import DataFrame, Index, Series, Timestamp, date_range, period_range
import pandas._testing as tm
from pandas.core.indexing import IndexingError
@@ -17,6 +17,19 @@
class TestSeriesGetitemScalars:
+ def test_getitem_out_of_bounds_indexerror(self, datetime_series):
+ # don't segfault, GH#495
+ msg = r"index \d+ is out of bounds for axis 0 with size \d+"
+ with pytest.raises(IndexError, match=msg):
+ datetime_series[len(datetime_series)]
+
+ def test_getitem_out_of_bounds_empty_rangeindex_keyerror(self):
+ # GH#917
+ # With a RangeIndex, an int key gives a KeyError
+ ser = Series([], dtype=object)
+ with pytest.raises(KeyError, match="-1"):
+ ser[-1]
+
def test_getitem_keyerror_with_int64index(self):
ser = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
@@ -292,11 +305,23 @@ def test_getitem_multilevel_scalar_slice_not_implemented(
ser[2000, 3:4]
+def test_getitem_dataframe_raises():
+ rng = list(range(10))
+ ser = Series(10, index=rng)
+ df = DataFrame(rng, index=rng)
+ msg = (
+ "Indexing a Series with DataFrame is not supported, "
+ "use the appropriate DataFrame column"
+ )
+ with pytest.raises(TypeError, match=msg):
+ ser[df > 5]
+
+
def test_getitem_assignment_series_aligment():
# https://github.com/pandas-dev/pandas/issues/37427
# with getitem, when assigning with a Series, it is not first aligned
- s = Series(range(10))
+ ser = Series(range(10))
idx = np.array([2, 4, 9])
- s[idx] = Series([10, 11, 12])
+ ser[idx] = Series([10, 11, 12])
expected = Series([0, 1, 10, 3, 11, 5, 6, 7, 8, 12])
- tm.assert_series_equal(s, expected)
+ tm.assert_series_equal(ser, expected)
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 8c53ed85a20b3..214694443ba2a 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -163,19 +163,6 @@ def test_getitem_with_duplicates_indices(result_1, duplicate_item, expected_1):
assert result[2] == result_1[2]
-def test_getitem_out_of_bounds(datetime_series):
- # don't segfault, GH #495
- msg = r"index \d+ is out of bounds for axis 0 with size \d+"
- with pytest.raises(IndexError, match=msg):
- datetime_series[len(datetime_series)]
-
- # GH #917
- # With a RangeIndex, an int key gives a KeyError
- s = Series([], dtype=object)
- with pytest.raises(KeyError, match="-1"):
- s[-1]
-
-
def test_getitem_setitem_integers():
# caused bug without test
s = Series([1, 2, 3], ["a", "b", "c"])
@@ -260,18 +247,6 @@ def test_setitem_ambiguous_keyerror():
tm.assert_series_equal(s2, expected)
-def test_getitem_dataframe():
- rng = list(range(10))
- s = Series(10, index=rng)
- df = DataFrame(rng, index=rng)
- msg = (
- "Indexing a Series with DataFrame is not supported, "
- "use the appropriate DataFrame column"
- )
- with pytest.raises(TypeError, match=msg):
- s[df > 5]
-
-
def test_setitem(datetime_series, string_series):
datetime_series[datetime_series.index[5]] = np.NaN
datetime_series[[1, 2, 17]] = np.NaN
@@ -296,22 +271,6 @@ def test_setitem(datetime_series, string_series):
tm.assert_series_equal(s, expected)
-def test_setitem_empty_series():
- # Test for issue #10193
- key = pd.Timestamp("2012-01-01")
- series = Series(dtype=object)
- series[key] = 47
- expected = Series(47, [key])
- tm.assert_series_equal(series, expected)
-
- # GH#33573 our index should retain its freq
- series = Series([], pd.DatetimeIndex([], freq="D"), dtype=object)
- series[key] = 47
- expected = Series(47, pd.DatetimeIndex([key], freq="D"))
- tm.assert_series_equal(series, expected)
- assert series.index.freq == expected.index.freq
-
-
def test_setitem_dtypes():
# change dtypes
# GH 4463
@@ -338,32 +297,13 @@ def test_setitem_dtypes():
tm.assert_series_equal(s, Series([np.nan, 1.0]))
-def test_set_value(datetime_series, string_series):
- idx = datetime_series.index[10]
- res = datetime_series._set_value(idx, 0)
- assert res is None
- assert datetime_series[idx] == 0
-
- # equiv
- s = string_series.copy()
- res = s._set_value("foobar", 0)
- assert res is None
- assert s.index[-1] == "foobar"
- assert s["foobar"] == 0
-
- s = string_series.copy()
- s.loc["foobar"] = 0
- assert s.index[-1] == "foobar"
- assert s["foobar"] == 0
-
-
def test_setslice(datetime_series):
sl = datetime_series[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique is True
-def test_2d_to_1d_assignment_raises():
+def test_loc_setitem_2d_to_1d_raises():
x = np.random.randn(2, 2)
y = Series(range(2))
@@ -611,25 +551,6 @@ def test_loc_setitem(string_series):
assert string_series[d2] == 6
-def test_setitem_na():
- # these induce dtype changes
- expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
- s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
- s[::2] = np.nan
- tm.assert_series_equal(s, expected)
-
- # gets coerced to float, right?
- expected = Series([np.nan, 1, np.nan, 0])
- s = Series([True, True, False, False])
- s[::2] = np.nan
- tm.assert_series_equal(s, expected)
-
- expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])
- s = Series(np.arange(10))
- s[:5] = np.nan
- tm.assert_series_equal(s, expected)
-
-
def test_timedelta_assignment():
# GH 8209
s = Series([], dtype=object)
@@ -829,52 +750,11 @@ def test_multilevel_preserve_name():
assert result2.name == s.name
-def test_setitem_scalar_into_readonly_backing_data():
- # GH14359: test that you cannot mutate a read only buffer
-
- array = np.zeros(5)
- array.flags.writeable = False # make the array immutable
- series = Series(array)
-
- for n in range(len(series)):
- msg = "assignment destination is read-only"
- with pytest.raises(ValueError, match=msg):
- series[n] = 1
-
- assert array[n] == 0
-
-
-def test_setitem_slice_into_readonly_backing_data():
- # GH14359: test that you cannot mutate a read only buffer
-
- array = np.zeros(5)
- array.flags.writeable = False # make the array immutable
- series = Series(array)
-
- msg = "assignment destination is read-only"
- with pytest.raises(ValueError, match=msg):
- series[1:3] = 1
-
- assert not array.any()
-
-
"""
miscellaneous methods
"""
-def test_pop():
- # GH 6600
- df = DataFrame({"A": 0, "B": np.arange(5, dtype="int64"), "C": 0})
- k = df.iloc[4]
-
- result = k.pop("B")
- assert result == 4
-
- expected = Series([0, 0], index=["A", "C"], name=4)
- tm.assert_series_equal(k, expected)
-
-
def test_uint_drop(any_int_dtype):
# see GH18311
# assigning series.loc[0] = 4 changed series.dtype to int
diff --git a/pandas/tests/series/indexing/test_pop.py b/pandas/tests/series/indexing/test_pop.py
new file mode 100644
index 0000000000000..7453f98ab3735
--- /dev/null
+++ b/pandas/tests/series/indexing/test_pop.py
@@ -0,0 +1,13 @@
+from pandas import Series
+import pandas._testing as tm
+
+
+def test_pop():
+ # GH#6600
+ ser = Series([0, 4, 0], index=["A", "B", "C"], name=4)
+
+ result = ser.pop("B")
+ assert result == 4
+
+ expected = Series([0, 0], index=["A", "C"], name=4)
+ tm.assert_series_equal(ser, expected)
diff --git a/pandas/tests/series/indexing/test_set_value.py b/pandas/tests/series/indexing/test_set_value.py
index ea09646de71c1..61b01720d1e40 100644
--- a/pandas/tests/series/indexing/test_set_value.py
+++ b/pandas/tests/series/indexing/test_set_value.py
@@ -19,3 +19,24 @@ def test_series_set_value():
expected = Series([1.0, np.nan], index=index)
tm.assert_series_equal(s, expected)
+
+
+def test_set_value_dt64(datetime_series):
+ idx = datetime_series.index[10]
+ res = datetime_series._set_value(idx, 0)
+ assert res is None
+ assert datetime_series[idx] == 0
+
+
+def test_set_value_str_index(string_series):
+ # equiv
+ ser = string_series.copy()
+ res = ser._set_value("foobar", 0)
+ assert res is None
+ assert ser.index[-1] == "foobar"
+ assert ser["foobar"] == 0
+
+ ser2 = string_series.copy()
+ ser2.loc["foobar"] = 0
+ assert ser2.index[-1] == "foobar"
+ assert ser2["foobar"] == 0
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index 967cf5c55845c..b4c5ac0195d26 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -3,7 +3,15 @@
import numpy as np
import pytest
-from pandas import MultiIndex, NaT, Series, Timestamp, date_range, period_range
+from pandas import (
+ DatetimeIndex,
+ MultiIndex,
+ NaT,
+ Series,
+ Timestamp,
+ date_range,
+ period_range,
+)
from pandas.core.indexing import IndexingError
import pandas.testing as tm
@@ -162,3 +170,71 @@ def test_setitem_callable_other(self):
expected = Series([1, 2, inc, 4])
tm.assert_series_equal(ser, expected)
+
+
+class TestSetitemCasting:
+ def test_setitem_nan_casts(self):
+ # these induce dtype changes
+ expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
+ ser = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
+ ser[::2] = np.nan
+ tm.assert_series_equal(ser, expected)
+
+ # gets coerced to float, right?
+ expected = Series([np.nan, 1, np.nan, 0])
+ ser = Series([True, True, False, False])
+ ser[::2] = np.nan
+ tm.assert_series_equal(ser, expected)
+
+ expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])
+ ser = Series(np.arange(10))
+ ser[:5] = np.nan
+ tm.assert_series_equal(ser, expected)
+
+
+class TestSetitemWithExpansion:
+ def test_setitem_empty_series(self):
+ # GH#10193
+ key = Timestamp("2012-01-01")
+ series = Series(dtype=object)
+ series[key] = 47
+ expected = Series(47, [key])
+ tm.assert_series_equal(series, expected)
+
+ def test_setitem_empty_series_datetimeindex_preserves_freq(self):
+ # GH#33573 our index should retain its freq
+ series = Series([], DatetimeIndex([], freq="D"), dtype=object)
+ key = Timestamp("2012-01-01")
+ series[key] = 47
+ expected = Series(47, DatetimeIndex([key], freq="D"))
+ tm.assert_series_equal(series, expected)
+ assert series.index.freq == expected.index.freq
+
+
+def test_setitem_scalar_into_readonly_backing_data():
+ # GH#14359: test that you cannot mutate a read only buffer
+
+ array = np.zeros(5)
+ array.flags.writeable = False # make the array immutable
+ series = Series(array)
+
+ for n in range(len(series)):
+ msg = "assignment destination is read-only"
+ with pytest.raises(ValueError, match=msg):
+ series[n] = 1
+
+ assert array[n] == 0
+
+
+def test_setitem_slice_into_readonly_backing_data():
+ # GH#14359: test that you cannot mutate a read only buffer
+
+ array = np.zeros(5)
+ array.flags.writeable = False # make the array immutable
+ series = Series(array)
+
+ msg = "assignment destination is read-only"
+ with pytest.raises(ValueError, match=msg):
+ series[1:3] = 1
+
+ assert not array.any()
| https://api.github.com/repos/pandas-dev/pandas/pulls/37507 | 2020-10-29T23:40:42Z | 2020-10-30T17:16:17Z | 2020-10-30T17:16:17Z | 2020-10-30T17:16:48Z | |
TST: pct_change from generic to frame | diff --git a/pandas/tests/frame/methods/test_pct_change.py b/pandas/tests/frame/methods/test_pct_change.py
index 8f3f37fb9fff7..56fb9ab0d8f00 100644
--- a/pandas/tests/frame/methods/test_pct_change.py
+++ b/pandas/tests/frame/methods/test_pct_change.py
@@ -6,6 +6,27 @@
class TestDataFramePctChange:
+ @pytest.mark.parametrize(
+ "periods,fill_method,limit,exp",
+ [
+ (1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
+ (1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
+ (1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
+ (1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
+ (-1, "ffill", None, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, 0, np.nan]),
+ (-1, "ffill", 1, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, np.nan, np.nan]),
+ (-1, "bfill", None, [0, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
+ (-1, "bfill", 1, [np.nan, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
+ ],
+ )
+ @pytest.mark.parametrize("klass", [DataFrame, Series])
+ def test_pct_change_with_nas(self, periods, fill_method, limit, exp, klass):
+ vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
+ obj = klass(vals)
+
+ res = obj.pct_change(periods=periods, fill_method=fill_method, limit=limit)
+ tm.assert_equal(res, klass(exp))
+
def test_pct_change_numeric(self):
# GH#11150
pnl = DataFrame(
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index cccae1babe82f..45601abc95fe6 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -356,29 +356,6 @@ def test_copy_and_deepcopy(self, shape, func):
assert obj_copy is not obj
self._compare(obj_copy, obj)
- @pytest.mark.parametrize(
- "periods,fill_method,limit,exp",
- [
- (1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
- (1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
- (1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
- (1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
- (-1, "ffill", None, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, 0, np.nan]),
- (-1, "ffill", 1, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, np.nan, np.nan]),
- (-1, "bfill", None, [0, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
- (-1, "bfill", 1, [np.nan, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
- ],
- )
- def test_pct_change(self, periods, fill_method, limit, exp):
- vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
- obj = self._typ(vals)
- func = getattr(obj, "pct_change")
- res = func(periods=periods, fill_method=fill_method, limit=limit)
- if type(obj) is DataFrame:
- tm.assert_frame_equal(res, DataFrame(exp))
- else:
- tm.assert_series_equal(res, Series(exp))
-
class TestNDFrame:
# tests that don't fit elsewhere
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37504 | 2020-10-29T22:34:09Z | 2020-10-30T13:24:00Z | 2020-10-30T13:24:00Z | 2020-10-30T14:49:07Z |
Backport PR #37473 on branch 1.1.x (CI: 32 bit maybe_indices_to_slice) | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 6bf0aba128e39..7127c57defee3 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -36,6 +36,7 @@ from numpy cimport (
float32_t,
float64_t,
int64_t,
+ intp_t,
ndarray,
uint8_t,
uint64_t,
@@ -490,7 +491,7 @@ def has_infs_f8(const float64_t[:] arr) -> bool:
return False
-def maybe_indices_to_slice(ndarray[int64_t] indices, int max_len):
+def maybe_indices_to_slice(ndarray[intp_t] indices, int max_len):
cdef:
Py_ssize_t i, n = len(indices)
int k, vstart, vlast, v
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5d7f15be6e107..0b2c99ea674c2 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2881,7 +2881,9 @@ def __getitem__(self, key):
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
if isinstance(indexer, np.ndarray):
- indexer = lib.maybe_indices_to_slice(indexer, len(self))
+ indexer = lib.maybe_indices_to_slice(
+ indexer.astype(np.intp, copy=False), len(self)
+ )
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index a18f7bdccd0d0..e4dee2b0a08ce 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5094,7 +5094,9 @@ def get_slice_bound(self, label, side: str_t, kind) -> int:
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view("u1"))
else:
- slc = lib.maybe_indices_to_slice(slc.astype("i8"), len(self))
+ slc = lib.maybe_indices_to_slice(
+ slc.astype(np.intp, copy=False), len(self)
+ )
if isinstance(slc, np.ndarray):
raise KeyError(
f"Cannot get {side} slice bound for non-unique "
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 9b57a25f1b0e9..b30ef37c14b4b 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -15,7 +15,6 @@
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
- ensure_int64,
is_bool_dtype,
is_dtype_equal,
is_integer,
@@ -181,7 +180,7 @@ def sort_values(self, return_indexer=False, ascending=True, key=None):
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
- indices = ensure_int64(indices)
+ indices = np.asarray(indices, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
@@ -581,7 +580,9 @@ def delete(self, loc):
freq = self.freq
else:
if is_list_like(loc):
- loc = lib.maybe_indices_to_slice(ensure_int64(np.array(loc)), len(self))
+ loc = lib.maybe_indices_to_slice(
+ np.asarray(loc, dtype=np.intp), len(self)
+ )
if isinstance(loc, slice) and loc.step in (1, None):
if loc.start in (0, None) or loc.stop in (len(self), None):
freq = self.freq
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 142e1bbe98d60..b9ba823ca1b0b 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2688,7 +2688,7 @@ def get_loc(self, key, method=None):
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
- if not isinstance(loc, np.ndarray) or loc.dtype != "int64":
+ if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
@@ -2735,7 +2735,7 @@ def _maybe_to_slice(loc):
stacklevel=10,
)
- loc = np.arange(start, stop, dtype="int64")
+ loc = np.arange(start, stop, dtype=np.intp)
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 3f3f0c68cb1ed..c9ac9cb0f140a 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -239,8 +239,8 @@ def _rebuild_blknos_and_blklocs(self) -> None:
"""
Update mgr._blknos / mgr._blklocs.
"""
- new_blknos = np.empty(self.shape[0], dtype=np.int64)
- new_blklocs = np.empty(self.shape[0], dtype=np.int64)
+ new_blknos = np.empty(self.shape[0], dtype=np.intp)
+ new_blklocs = np.empty(self.shape[0], dtype=np.intp)
new_blknos.fill(-1)
new_blklocs.fill(-1)
diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py
index b6f59807eaa15..60cbe0d94e734 100644
--- a/pandas/tests/test_lib.py
+++ b/pandas/tests/test_lib.py
@@ -51,7 +51,7 @@ def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
- indices = np.array([], dtype=np.int64)
+ indices = np.array([], dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
@@ -59,7 +59,7 @@ def test_maybe_indices_to_slice_left_edge(self):
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
- indices = np.arange(0, end, step, dtype=np.int64)
+ indices = np.arange(0, end, step, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
@@ -74,7 +74,7 @@ def test_maybe_indices_to_slice_left_edge(self):
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2], [2, 0, -2]]:
- indices = np.array(case, dtype=np.int64)
+ indices = np.array(case, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
@@ -87,7 +87,7 @@ def test_maybe_indices_to_slice_right_edge(self):
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
- indices = np.arange(start, 99, step, dtype=np.int64)
+ indices = np.arange(start, 99, step, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
@@ -101,7 +101,7 @@ def test_maybe_indices_to_slice_right_edge(self):
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
- indices = np.array([97, 98, 99, 100], dtype=np.int64)
+ indices = np.array([97, 98, 99, 100], dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
@@ -114,7 +114,7 @@ def test_maybe_indices_to_slice_right_edge(self):
with pytest.raises(IndexError, match=msg):
target[maybe_slice]
- indices = np.array([100, 99, 98, 97], dtype=np.int64)
+ indices = np.array([100, 99, 98, 97], dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
@@ -126,7 +126,7 @@ def test_maybe_indices_to_slice_right_edge(self):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
- indices = np.array(case, dtype=np.int64)
+ indices = np.array(case, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
@@ -138,7 +138,7 @@ def test_maybe_indices_to_slice_both_edges(self):
# slice
for step in [1, 2, 4, 5, 8, 9]:
- indices = np.arange(0, 9, step, dtype=np.int64)
+ indices = np.arange(0, 9, step, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
@@ -151,7 +151,7 @@ def test_maybe_indices_to_slice_both_edges(self):
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
- indices = np.array(case, dtype=np.int64)
+ indices = np.array(case, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
@@ -163,7 +163,7 @@ def test_maybe_indices_to_slice_middle(self):
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
- indices = np.arange(start, end, step, dtype=np.int64)
+ indices = np.arange(start, end, step, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
@@ -178,7 +178,7 @@ def test_maybe_indices_to_slice_middle(self):
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
- indices = np.array(case, dtype=np.int64)
+ indices = np.array(case, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
| Backport PR #37473: CI: 32 bit maybe_indices_to_slice | https://api.github.com/repos/pandas-dev/pandas/pulls/37503 | 2020-10-29T22:03:15Z | 2020-10-30T10:12:38Z | 2020-10-30T10:12:38Z | 2020-10-30T10:12:38Z |
REGR: revert behaviour of getitem with assigning with a Series | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 6cb728800dc68..7d35e8b12b9b8 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -28,6 +28,7 @@ Fixed regressions
- Fixed regression in certain offsets (:meth:`pd.offsets.Day() <pandas.tseries.offsets.Day>` and below) no longer being hashable (:issue:`37267`)
- Fixed regression in :class:`StataReader` which required ``chunksize`` to be manually set when using an iterator to read a dataset (:issue:`37280`)
- Fixed regression in setitem with :meth:`DataFrame.iloc` which raised error when trying to set a value while filtering with a boolean list (:issue:`36741`)
+- Fixed regression in setitem with a Series getting aligned before setting the values (:issue:`37427`)
- Fixed regression in :attr:`MultiIndex.is_monotonic_increasing` returning wrong results with ``NaN`` in at least one of the levels (:issue:`37220`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 379c4ac1c9526..2cd861cc11b28 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1048,10 +1048,8 @@ def _set_with_engine(self, key, value):
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
- # extract_array so that if we set e.g. ser[-5:] = ser[:5]
- # we get the first five values, and not 5 NaNs
indexer = self.index._convert_slice_indexer(key, kind="getitem")
- self.iloc[indexer] = extract_array(value, extract_numpy=True)
+ return self._set_values(indexer, value)
else:
assert not isinstance(key, tuple)
@@ -1069,12 +1067,28 @@ def _set_with(self, key, value):
# should be caught by the is_bool_indexer check in __setitem__
if key_type == "integer":
if not self.index._should_fallback_to_positional():
- self.loc[key] = value
+ self._set_labels(key, value)
else:
- self.iloc[key] = value
+ self._set_values(key, value)
else:
self.loc[key] = value
+ def _set_labels(self, key, value):
+ key = com.asarray_tuplesafe(key)
+ indexer: np.ndarray = self.index.get_indexer(key)
+ mask = indexer == -1
+ if mask.any():
+ raise KeyError(f"{key[mask]} not in index")
+ self._set_values(indexer, value)
+
+ def _set_values(self, key, value):
+ if isinstance(key, Series):
+ key = key._values
+ self._mgr = self._mgr.setitem( # type: ignore[assignment]
+ indexer=key, value=value
+ )
+ self._maybe_update_cacher()
+
def _set_value(self, label, value, takeable: bool = False):
"""
Quickly set single value at passed label.
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 9f6aab823c3ad..cc448279bfce0 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -290,3 +290,13 @@ def test_getitem_multilevel_scalar_slice_not_implemented(
msg = r"\(2000, slice\(3, 4, None\)\)"
with pytest.raises(TypeError, match=msg):
ser[2000, 3:4]
+
+
+def test_getitem_assignment_series_aligment():
+ # https://github.com/pandas-dev/pandas/issues/37427
+ # with getitem, when assigning with a Series, it is not first aligned
+ s = Series(range(10))
+ idx = np.array([2, 4, 9])
+ s[idx] = Series([10, 11, 12])
+ expected = Series([0, 1, 10, 3, 11, 5, 6, 7, 8, 12])
+ tm.assert_series_equal(s, expected)
| Closes #37427
@jbrockmendel this is reverting part of the clean-up you did in https://github.com/pandas-dev/pandas/pull/33643. I think the behavioural change was unintentional in that PR. We might still want to do it for 1.2, but then with a deprecation first. | https://api.github.com/repos/pandas-dev/pandas/pulls/37502 | 2020-10-29T22:00:13Z | 2020-10-30T08:14:54Z | 2020-10-30T08:14:54Z | 2020-10-30T08:42:59Z |
Backport PR #37496 on branch 1.1.x (DOC: release date for 1.1.4) | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 6cb728800dc68..dc0b5d3976489 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -1,7 +1,7 @@
.. _whatsnew_114:
-What's new in 1.1.4 (??)
-------------------------
+What's new in 1.1.4 (October 30, 2020)
+--------------------------------------
These are the changes in pandas 1.1.4. See :ref:`release` for a full changelog
including other versions of pandas.
@@ -44,14 +44,6 @@ Bug fixes
.. ---------------------------------------------------------------------------
-.. _whatsnew_114.other:
-
-Other
-~~~~~
--
-
-.. ---------------------------------------------------------------------------
-
.. _whatsnew_114.contributors:
Contributors
| Backport PR #37496: DOC: release date for 1.1.4 | https://api.github.com/repos/pandas-dev/pandas/pulls/37500 | 2020-10-29T21:48:21Z | 2020-10-30T10:32:39Z | 2020-10-30T10:32:39Z | 2020-10-30T10:32:39Z |
REGR: fix isin for large series with nan and mixed object dtype (causing regression in read_csv) | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 6cb728800dc68..277edd15a9322 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :func:`read_csv` raising a ``ValueError`` when ``names`` was of type ``dict_keys`` (:issue:`36928`)
+- Fixed regression in :func:`read_csv` with more than 1M rows and specifying a ``index_col`` argument (:issue:`37094`)
- Fixed regression where attempting to mutate a :class:`DateOffset` object would no longer raise an ``AttributeError`` (:issue:`36940`)
- Fixed regression where :meth:`DataFrame.agg` would fail with :exc:`TypeError` when passed positional arguments to be passed on to the aggregation function (:issue:`36948`).
- Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 9a3144d1ccbaa..a310ec5312cf4 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -441,7 +441,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
if len(comps) > 1_000_000 and not is_object_dtype(comps):
# If the the values include nan we need to check for nan explicitly
# since np.nan it not equal to np.nan
- if np.isnan(values).any():
+ if isna(values).any():
f = lambda c, v: np.logical_or(np.in1d(c, v), np.isnan(c))
else:
f = np.in1d
diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py
index 4d64f2bf411bd..9c6cad4b41949 100644
--- a/pandas/tests/io/parser/test_index_col.py
+++ b/pandas/tests/io/parser/test_index_col.py
@@ -207,3 +207,18 @@ def test_header_with_index_col(all_parsers):
result = parser.read_csv(StringIO(data), index_col="I11", header=0)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.slow
+def test_index_col_large_csv(all_parsers):
+ # https://github.com/pandas-dev/pandas/issues/37094
+ parser = all_parsers
+
+ N = 1_000_001
+ df = DataFrame({"a": range(N), "b": np.random.randn(N)})
+
+ with tm.ensure_clean() as path:
+ df.to_csv(path, index=False)
+ result = parser.read_csv(path, index_col=[0])
+
+ tm.assert_frame_equal(result, df.set_index("a"))
diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py
index 62766c692f4df..86ea2b2f02a4d 100644
--- a/pandas/tests/series/methods/test_isin.py
+++ b/pandas/tests/series/methods/test_isin.py
@@ -89,3 +89,13 @@ def test_isin_read_only(self):
result = s.isin(arr)
expected = Series([True, True, True])
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.slow
+def test_isin_large_series_mixed_dtypes_and_nan():
+ # https://github.com/pandas-dev/pandas/issues/37094
+ # combination of object dtype for the values and > 1_000_000 elements
+ ser = Series([1, 2, np.nan] * 1_000_000)
+ result = ser.isin({"foo", "bar"})
+ expected = Series([False] * 3 * 1_000_000)
+ tm.assert_series_equal(result, expected)
| Closes #37094
| https://api.github.com/repos/pandas-dev/pandas/pulls/37499 | 2020-10-29T21:33:13Z | 2020-10-30T10:51:29Z | 2020-10-30T10:51:28Z | 2020-10-30T11:02:59Z |
TST: tz_localize/tz_convert unclear test assertions | diff --git a/pandas/tests/frame/methods/test_tz_convert.py b/pandas/tests/frame/methods/test_tz_convert.py
index d2ab7a386a92d..c70e479723644 100644
--- a/pandas/tests/frame/methods/test_tz_convert.py
+++ b/pandas/tests/frame/methods/test_tz_convert.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Index, MultiIndex, date_range
+from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
@@ -88,3 +88,19 @@ def test_tz_convert_and_localize(self, fn):
with pytest.raises(ValueError, match="not valid"):
df = DataFrame(index=l0)
df = getattr(df, fn)("US/Pacific", level=1)
+
+ @pytest.mark.parametrize("klass", [Series, DataFrame])
+ @pytest.mark.parametrize("copy", [True, False])
+ def test_tz_convert_copy_inplace_mutate(self, copy, klass):
+ # GH#6326
+ obj = klass(
+ np.arange(0, 5),
+ index=date_range("20131027", periods=5, freq="1H", tz="Europe/Berlin"),
+ )
+ orig = obj.copy()
+ result = obj.tz_convert("UTC", copy=copy)
+ expected = klass(np.arange(0, 5), index=obj.index.tz_convert("UTC"))
+ tm.assert_equal(result, expected)
+ tm.assert_equal(obj, orig)
+ assert result.index is not obj.index
+ assert result is not obj
diff --git a/pandas/tests/frame/methods/test_tz_localize.py b/pandas/tests/frame/methods/test_tz_localize.py
index 1d4e26a6999b7..183b81ca5298e 100644
--- a/pandas/tests/frame/methods/test_tz_localize.py
+++ b/pandas/tests/frame/methods/test_tz_localize.py
@@ -1,4 +1,7 @@
-from pandas import DataFrame, date_range
+import numpy as np
+import pytest
+
+from pandas import DataFrame, Series, date_range
import pandas._testing as tm
@@ -19,3 +22,21 @@ def test_frame_tz_localize(self):
result = df.tz_localize("utc", axis=1)
assert result.columns.tz.zone == "UTC"
tm.assert_frame_equal(result, expected.T)
+
+ @pytest.mark.parametrize("klass", [Series, DataFrame])
+ @pytest.mark.parametrize("copy", [True, False])
+ def test_tz_localize_copy_inplace_mutate(self, copy, klass):
+ # GH#6326
+ obj = klass(
+ np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=None)
+ )
+ orig = obj.copy()
+ result = obj.tz_localize("UTC", copy=copy)
+ expected = klass(
+ np.arange(0, 5),
+ index=date_range("20131027", periods=5, freq="1H", tz="UTC"),
+ )
+ tm.assert_equal(result, expected)
+ tm.assert_equal(obj, orig)
+ assert result.index is not obj.index
+ assert result is not obj
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index a2fab5c7e0f0e..bb091ba1beb2d 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1,6 +1,7 @@
from collections import OrderedDict
from datetime import datetime, timedelta
+from dateutil.tz import tzoffset
import numpy as np
import numpy.ma as ma
import pytest
@@ -1588,6 +1589,20 @@ def test_constructor_dict_timedelta_index(self):
)
tm.assert_series_equal(result, expected)
+ def test_constructor_infer_index_tz(self):
+ values = [188.5, 328.25]
+ tzinfo = tzoffset(None, 7200)
+ index = [
+ datetime(2012, 5, 11, 11, tzinfo=tzinfo),
+ datetime(2012, 5, 11, 12, tzinfo=tzinfo),
+ ]
+ series = Series(data=values, index=index)
+
+ assert series.index.tz == tzinfo
+
+ # it works! GH#2443
+ repr(series.index[0])
+
class TestSeriesConstructorIndexCoercion:
def test_series_constructor_datetimelike_index_coercion(self):
diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py
deleted file mode 100644
index 05792dc4f00d2..0000000000000
--- a/pandas/tests/series/test_timezones.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""
-Tests for Series timezone-related methods
-"""
-from datetime import datetime
-
-from dateutil.tz import tzoffset
-import numpy as np
-import pytest
-
-from pandas import Series
-import pandas._testing as tm
-from pandas.core.indexes.datetimes import date_range
-
-
-class TestSeriesTimezones:
- def test_dateutil_tzoffset_support(self):
- values = [188.5, 328.25]
- tzinfo = tzoffset(None, 7200)
- index = [
- datetime(2012, 5, 11, 11, tzinfo=tzinfo),
- datetime(2012, 5, 11, 12, tzinfo=tzinfo),
- ]
- series = Series(data=values, index=index)
-
- assert series.index.tz == tzinfo
-
- # it works! #2443
- repr(series.index[0])
-
- @pytest.mark.parametrize("copy", [True, False])
- @pytest.mark.parametrize(
- "method, tz", [["tz_localize", None], ["tz_convert", "Europe/Berlin"]]
- )
- def test_tz_localize_convert_copy_inplace_mutate(self, copy, method, tz):
- # GH 6326
- result = Series(
- np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz)
- )
- getattr(result, method)("UTC", copy=copy)
- expected = Series(
- np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz)
- )
- tm.assert_series_equal(result, expected)
| kill off tests.series.test_timezones. | https://api.github.com/repos/pandas-dev/pandas/pulls/37498 | 2020-10-29T21:21:54Z | 2020-10-30T13:21:14Z | 2020-10-30T13:21:14Z | 2020-10-30T14:53:02Z |
DOC: release date for 1.1.4 | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 6cb728800dc68..dc0b5d3976489 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -1,7 +1,7 @@
.. _whatsnew_114:
-What's new in 1.1.4 (??)
-------------------------
+What's new in 1.1.4 (October 30, 2020)
+--------------------------------------
These are the changes in pandas 1.1.4. See :ref:`release` for a full changelog
including other versions of pandas.
@@ -44,14 +44,6 @@ Bug fixes
.. ---------------------------------------------------------------------------
-.. _whatsnew_114.other:
-
-Other
-~~~~~
--
-
-.. ---------------------------------------------------------------------------
-
.. _whatsnew_114.contributors:
Contributors
| https://api.github.com/repos/pandas-dev/pandas/pulls/37496 | 2020-10-29T20:52:07Z | 2020-10-29T21:47:47Z | 2020-10-29T21:47:47Z | 2020-10-29T22:00:51Z | |
BUG: Series[uintarray] failing to raise KeyError | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 812af544ed9d8..312a850ec3fd1 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -439,6 +439,7 @@ Indexing
- Bug in :meth:`DataFrame.__getitem__` and :meth:`DataFrame.loc.__getitem__` with :class:`IntervalIndex` columns and a numeric indexer (:issue:`26490`)
- Bug in :meth:`Series.loc.__getitem__` with a non-unique :class:`MultiIndex` and an empty-list indexer (:issue:`13691`)
- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`MultiIndex` with a level named "0" (:issue:`37194`)
+- Bug in :meth:`Series.__getitem__` when using an unsigned integer array as an indexer giving incorrect results or segfaulting instead of raising ``KeyError`` (:issue:`37218`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 74ec892d5f8a0..3afcb0fa343d5 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -17,9 +17,9 @@
ensure_python_int,
is_float,
is_integer,
- is_integer_dtype,
is_list_like,
is_scalar,
+ is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
@@ -369,7 +369,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
start, stop, step = reverse.start, reverse.stop, reverse.step
target_array = np.asarray(target)
- if not (is_integer_dtype(target_array) and target_array.ndim == 1):
+ if not (is_signed_integer_dtype(target_array) and target_array.ndim == 1):
# checks/conversions/roundings are delegated to general method
return super().get_indexer(target, method=method, tolerance=tolerance)
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 9f6aab823c3ad..ab34423fa7acd 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -147,6 +147,16 @@ def test_getitem_intlist_multiindex_numeric_level(self, dtype, box):
with pytest.raises(KeyError, match="5"):
ser[key]
+ def test_getitem_uint_array_key(self, uint_dtype):
+ # GH #37218
+ ser = Series([1, 2, 3])
+ key = np.array([4], dtype=uint_dtype)
+
+ with pytest.raises(KeyError, match="4"):
+ ser[key]
+ with pytest.raises(KeyError, match="4"):
+ ser.loc[key]
+
class TestGetitemBooleanMask:
def test_getitem_boolean(self, string_series):
| - [x] closes #37218
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37495 | 2020-10-29T20:24:18Z | 2020-10-30T22:45:57Z | 2020-10-30T22:45:57Z | 2020-10-30T22:49:43Z |
CLN: unify numpy.random-related imports | diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 728ffc6f85ba4..3e16ec134db46 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -6,7 +6,6 @@
import warnings
import numpy as np
-from numpy.random import rand, randint, randn
import pytest
from pandas.compat import is_platform_windows
@@ -126,15 +125,15 @@ def _is_py3_complex_incompat(result, expected):
@pytest.fixture(params=list(range(5)))
def lhs(request):
- nan_df1 = DataFrame(rand(10, 5))
+ nan_df1 = DataFrame(np.random.rand(10, 5))
nan_df1[nan_df1 > 0.5] = np.nan
opts = (
- DataFrame(randn(10, 5)),
- Series(randn(5)),
+ DataFrame(np.random.randn(10, 5)),
+ Series(np.random.randn(5)),
Series([1, 2, np.nan, np.nan, 5]),
nan_df1,
- randn(),
+ np.random.randn(),
)
return opts[request.param]
@@ -455,7 +454,7 @@ def test_frame_invert(self):
# ~ ##
# frame
# float always raises
- lhs = DataFrame(randn(5, 2))
+ lhs = DataFrame(np.random.randn(5, 2))
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'invert_dd'"
with pytest.raises(NotImplementedError, match=msg):
@@ -466,7 +465,7 @@ def test_frame_invert(self):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
# int raises on numexpr
- lhs = DataFrame(randint(5, size=(5, 2)))
+ lhs = DataFrame(np.random.randint(5, size=(5, 2)))
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'invert"
with pytest.raises(NotImplementedError, match=msg):
@@ -477,13 +476,13 @@ def test_frame_invert(self):
tm.assert_frame_equal(expect, result)
# bool always works
- lhs = DataFrame(rand(5, 2) > 0.5)
+ lhs = DataFrame(np.random.rand(5, 2) > 0.5)
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
# object raises
- lhs = DataFrame({"b": ["a", 1, 2.0], "c": rand(3) > 0.5})
+ lhs = DataFrame({"b": ["a", 1, 2.0], "c": np.random.rand(3) > 0.5})
if self.engine == "numexpr":
with pytest.raises(ValueError, match="unknown type object"):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
@@ -498,7 +497,7 @@ def test_series_invert(self):
# series
# float raises
- lhs = Series(randn(5))
+ lhs = Series(np.random.randn(5))
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'invert_dd'"
with pytest.raises(NotImplementedError, match=msg):
@@ -509,7 +508,7 @@ def test_series_invert(self):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
# int raises on numexpr
- lhs = Series(randint(5, size=5))
+ lhs = Series(np.random.randint(5, size=5))
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'invert"
with pytest.raises(NotImplementedError, match=msg):
@@ -520,7 +519,7 @@ def test_series_invert(self):
tm.assert_series_equal(expect, result)
# bool
- lhs = Series(rand(5) > 0.5)
+ lhs = Series(np.random.rand(5) > 0.5)
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
@@ -543,19 +542,19 @@ def test_frame_negate(self):
expr = self.ex("-")
# float
- lhs = DataFrame(randn(5, 2))
+ lhs = DataFrame(np.random.randn(5, 2))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
# int
- lhs = DataFrame(randint(5, size=(5, 2)))
+ lhs = DataFrame(np.random.randint(5, size=(5, 2)))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
- lhs = DataFrame(rand(5, 2) > 0.5)
+ lhs = DataFrame(np.random.rand(5, 2) > 0.5)
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'neg_bb'"
with pytest.raises(NotImplementedError, match=msg):
@@ -569,19 +568,19 @@ def test_series_negate(self):
expr = self.ex("-")
# float
- lhs = Series(randn(5))
+ lhs = Series(np.random.randn(5))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
# int
- lhs = Series(randint(5, size=5))
+ lhs = Series(np.random.randint(5, size=5))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
- lhs = Series(rand(5) > 0.5)
+ lhs = Series(np.random.rand(5) > 0.5)
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'neg_bb'"
with pytest.raises(NotImplementedError, match=msg):
@@ -595,11 +594,11 @@ def test_series_negate(self):
"lhs",
[
# Float
- DataFrame(randn(5, 2)),
+ DataFrame(np.random.randn(5, 2)),
# Int
- DataFrame(randint(5, size=(5, 2))),
+ DataFrame(np.random.randint(5, size=(5, 2))),
# bool doesn't work with numexpr but works elsewhere
- DataFrame(rand(5, 2) > 0.5),
+ DataFrame(np.random.rand(5, 2) > 0.5),
],
)
def test_frame_pos(self, lhs):
@@ -613,11 +612,11 @@ def test_frame_pos(self, lhs):
"lhs",
[
# Float
- Series(randn(5)),
+ Series(np.random.randn(5)),
# Int
- Series(randint(5, size=5)),
+ Series(np.random.randint(5, size=5)),
# bool doesn't work with numexpr but works elsewhere
- Series(rand(5) > 0.5),
+ Series(np.random.rand(5) > 0.5),
],
)
def test_series_pos(self, lhs):
@@ -688,7 +687,7 @@ def test_disallow_scalar_bool_ops(self):
exprs += ("2 * x > 2 or 1 and 2",)
exprs += ("2 * df > 3 and 1 or a",)
- x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2)) # noqa
+ x, a, b, df = np.random.randn(3), 1, 2, DataFrame(np.random.randn(3, 2)) # noqa
for ex in exprs:
msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not"
with pytest.raises(NotImplementedError, match=msg):
@@ -909,7 +908,7 @@ def test_frame_comparison(self, engine, parser, r_idx_type, c_idx_type):
res = pd.eval("df < 2", engine=engine, parser=parser)
tm.assert_frame_equal(res, df < 2)
- df3 = DataFrame(randn(*df.shape), index=df.index, columns=df.columns)
+ df3 = DataFrame(np.random.randn(*df.shape), index=df.index, columns=df.columns)
res = pd.eval("df < df3", engine=engine, parser=parser)
tm.assert_frame_equal(res, df < df3)
@@ -1089,8 +1088,8 @@ def test_complex_series_frame_alignment(self, engine, parser, r1, c1, r2, c2):
tm.assert_frame_equal(res, expected)
def test_performance_warning_for_poor_alignment(self, engine, parser):
- df = DataFrame(randn(1000, 10))
- s = Series(randn(10000))
+ df = DataFrame(np.random.randn(1000, 10))
+ s = Series(np.random.randn(10000))
if engine == "numexpr":
seen = PerformanceWarning
else:
@@ -1099,17 +1098,17 @@ def test_performance_warning_for_poor_alignment(self, engine, parser):
with tm.assert_produces_warning(seen):
pd.eval("df + s", engine=engine, parser=parser)
- s = Series(randn(1000))
+ s = Series(np.random.randn(1000))
with tm.assert_produces_warning(False):
pd.eval("df + s", engine=engine, parser=parser)
- df = DataFrame(randn(10, 10000))
- s = Series(randn(10000))
+ df = DataFrame(np.random.randn(10, 10000))
+ s = Series(np.random.randn(10000))
with tm.assert_produces_warning(False):
pd.eval("df + s", engine=engine, parser=parser)
- df = DataFrame(randn(10, 10))
- s = Series(randn(10000))
+ df = DataFrame(np.random.randn(10, 10))
+ s = Series(np.random.randn(10000))
is_python_engine = engine == "python"
@@ -1206,8 +1205,8 @@ def test_bool_ops_with_constants(self, rhs, lhs, op):
assert res == exp
def test_4d_ndarray_fails(self):
- x = randn(3, 4, 5, 6)
- y = Series(randn(10))
+ x = np.random.randn(3, 4, 5, 6)
+ y = Series(np.random.randn(10))
msg = "N-dimensional objects, where N > 2, are not supported with eval"
with pytest.raises(NotImplementedError, match=msg):
self.eval("x + y", local_dict={"x": x, "y": y})
@@ -1217,7 +1216,7 @@ def test_constant(self):
assert x == 1
def test_single_variable(self):
- df = DataFrame(randn(10, 2))
+ df = DataFrame(np.random.randn(10, 2))
df2 = self.eval("df", local_dict={"df": df})
tm.assert_frame_equal(df, df2)
@@ -1574,7 +1573,7 @@ def test_nested_period_index_subscript_expression(self):
tm.assert_frame_equal(r, e)
def test_date_boolean(self):
- df = DataFrame(randn(5, 3))
+ df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
res = self.eval(
"df.dates1 < 20130101",
@@ -1859,7 +1858,7 @@ class TestMathNumExprPython(TestMathPythonPython):
parser = "python"
-_var_s = randn(10)
+_var_s = np.random.randn(10)
class TestScope:
diff --git a/pandas/tests/indexing/multiindex/test_insert.py b/pandas/tests/indexing/multiindex/test_insert.py
index 42922c3deeee4..9f5ad90d36e03 100644
--- a/pandas/tests/indexing/multiindex/test_insert.py
+++ b/pandas/tests/indexing/multiindex/test_insert.py
@@ -1,4 +1,4 @@
-from numpy.random import randn
+import numpy as np
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
@@ -14,7 +14,7 @@ def test_setitem_mixed_depth(self):
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
- df = DataFrame(randn(4, 6), columns=index)
+ df = DataFrame(np.random.randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index b58b81d5aa1b3..059f8543104a7 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -1,5 +1,4 @@
import numpy as np
-from numpy.random import randn
import pytest
import pandas as pd
@@ -311,7 +310,9 @@ def test_frame_getitem_setitem_multislice(self):
tm.assert_frame_equal(df, result)
def test_frame_setitem_multi_column(self):
- df = DataFrame(randn(10, 4), columns=[["a", "a", "b", "b"], [0, 1, 0, 1]])
+ df = DataFrame(
+ np.random.randn(10, 4), columns=[["a", "a", "b", "b"], [0, 1, 0, 1]]
+ )
cp = df.copy()
cp["a"] = cp["b"]
diff --git a/pandas/tests/indexing/multiindex/test_sorted.py b/pandas/tests/indexing/multiindex/test_sorted.py
index bafe5068e1418..8a013c769f2cc 100644
--- a/pandas/tests/indexing/multiindex/test_sorted.py
+++ b/pandas/tests/indexing/multiindex/test_sorted.py
@@ -1,5 +1,4 @@
import numpy as np
-from numpy.random import randn
import pytest
from pandas import DataFrame, MultiIndex, Series
@@ -115,7 +114,7 @@ def test_series_getitem_not_sorted(self):
]
tuples = zip(*arrays)
index = MultiIndex.from_tuples(tuples)
- s = Series(randn(8), index=index)
+ s = Series(np.random.randn(8), index=index)
arrays = [np.array(x) for x in zip(*index.values)]
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py
index a454d3b855cdf..569bc8a04862e 100644
--- a/pandas/tests/io/test_clipboard.py
+++ b/pandas/tests/io/test_clipboard.py
@@ -1,7 +1,6 @@
from textwrap import dedent
import numpy as np
-from numpy.random import randint
import pytest
import pandas as pd
@@ -54,7 +53,7 @@ def df(request):
return tm.makeCustomDataframe(
max_rows + 1,
3,
- data_gen_f=lambda *args: randint(2),
+ data_gen_f=lambda *args: np.random.randint(2),
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
@@ -95,7 +94,7 @@ def df(request):
return tm.makeCustomDataframe(
5,
3,
- data_gen_f=lambda *args: randint(2),
+ data_gen_f=lambda *args: np.random.randint(2),
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 59034e9f3d807..f929d4ac31484 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -7,7 +7,6 @@
from urllib.error import URLError
import numpy as np
-from numpy.random import rand
import pytest
from pandas.compat import is_platform_windows
@@ -110,7 +109,7 @@ def test_to_html_compat(self):
tm.makeCustomDataframe(
4,
3,
- data_gen_f=lambda *args: rand(),
+ data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 2a6bd97c93b8e..c04b70f3c2953 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -2,7 +2,6 @@
import warnings
import numpy as np
-from numpy import random
from pandas.util._decorators import cache_readonly
import pandas.util._test_decorators as td
@@ -50,11 +49,11 @@ def setup_method(self, method):
{
"gender": gender,
"classroom": classroom,
- "height": random.normal(66, 4, size=n),
- "weight": random.normal(161, 32, size=n),
- "category": random.randint(4, size=n),
+ "height": np.random.normal(66, 4, size=n),
+ "weight": np.random.normal(161, 32, size=n),
+ "category": np.random.randint(4, size=n),
"datetime": to_datetime(
- random.randint(
+ np.random.randint(
self.start_date_to_int64,
self.end_date_to_int64,
size=n,
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 0a096acc9fa6d..dc2e9e1e8d15f 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -2,7 +2,6 @@
import string
import numpy as np
-from numpy import random
import pytest
import pandas.util._test_decorators as td
@@ -186,7 +185,7 @@ def test_boxplot_numeric_data(self):
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
- df = DataFrame(random.rand(10, 2))
+ df = DataFrame(np.random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@@ -197,7 +196,7 @@ def test_color_kwd(self, colors_kwd, expected):
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
- df = DataFrame(random.rand(10, 2))
+ df = DataFrame(np.random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@@ -293,7 +292,7 @@ def test_grouped_box_return_type(self):
self._check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
columns2 = "X B C D A G Y N Q O".split()
- df2 = DataFrame(random.randn(50, 10), columns=columns2)
+ df2 = DataFrame(np.random.randn(50, 10), columns=columns2)
categories2 = "A B C D E F G H I J".split()
df2["category"] = categories2 * 5
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index ba59fc1a3cc3f..e6a61d35365a3 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -6,7 +6,6 @@
import warnings
import numpy as np
-from numpy.random import rand, randn
import pytest
import pandas.util._test_decorators as td
@@ -170,7 +169,7 @@ def test_integer_array_plot(self):
def test_mpl2_color_cycle_str(self):
# GH 15516
- df = DataFrame(randn(10, 3), columns=["a", "b", "c"])
+ df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", "MatplotlibDeprecationWarning")
@@ -198,7 +197,7 @@ def test_rgb_tuple_color(self):
_check_plot_works(df.plot, x="x", y="y", color=(1, 0, 0, 0.5))
def test_color_empty_string(self):
- df = DataFrame(randn(10, 2))
+ df = DataFrame(np.random.randn(10, 2))
with pytest.raises(ValueError):
df.plot(color="")
@@ -243,14 +242,14 @@ def test_nonnumeric_exclude(self):
@pytest.mark.slow
def test_implicit_label(self):
- df = DataFrame(randn(10, 3), columns=["a", "b", "c"])
+ df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
ax = df.plot(x="a", y="b")
self._check_text_labels(ax.xaxis.get_label(), "a")
@pytest.mark.slow
def test_donot_overwrite_index_name(self):
# GH 8494
- df = DataFrame(randn(2, 2), columns=["a", "b"])
+ df = DataFrame(np.random.randn(2, 2), columns=["a", "b"])
df.index.name = "NAME"
df.plot(y="b", label="LABEL")
assert df.index.name == "NAME"
@@ -813,7 +812,7 @@ def test_subplots_dup_columns(self):
def test_negative_log(self):
df = -DataFrame(
- rand(6, 4),
+ np.random.rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
@@ -832,15 +831,20 @@ def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
def test_line_area_stacked(self):
with tm.RNGContext(42):
- df = DataFrame(rand(6, 4), columns=["w", "x", "y", "z"])
+ df = DataFrame(np.random.rand(6, 4), columns=["w", "x", "y", "z"])
neg_df = -df
# each column has either positive or negative value
sep_df = DataFrame(
- {"w": rand(6), "x": rand(6), "y": -rand(6), "z": -rand(6)}
+ {
+ "w": np.random.rand(6),
+ "x": np.random.rand(6),
+ "y": -np.random.rand(6),
+ "z": -np.random.rand(6),
+ }
)
# each column has positive-negative mixed value
mixed_df = DataFrame(
- randn(6, 4),
+ np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["w", "x", "y", "z"],
)
@@ -908,7 +912,7 @@ def test_line_area_nan_df(self):
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
- df = DataFrame(rand(6, 3), columns=["x", "y", "z"])
+ df = DataFrame(np.random.rand(6, 3), columns=["x", "y", "z"])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
@@ -932,7 +936,7 @@ def test_line_lim(self):
assert xmax >= lines[0].get_data()[0][-1]
def test_area_lim(self):
- df = DataFrame(rand(6, 4), columns=["x", "y", "z", "four"])
+ df = DataFrame(np.random.rand(6, 4), columns=["x", "y", "z", "four"])
neg_df = -df
for stacked in [True, False]:
@@ -954,7 +958,7 @@ def test_bar_colors(self):
default_colors = self._unpack_cycler(plt.rcParams)
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
@@ -1004,7 +1008,7 @@ def test_bar_user_colors(self):
@pytest.mark.slow
def test_bar_linewidth(self):
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
@@ -1025,7 +1029,7 @@ def test_bar_linewidth(self):
@pytest.mark.slow
def test_bar_barwidth(self):
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
width = 0.9
@@ -1063,7 +1067,7 @@ def test_bar_barwidth(self):
@pytest.mark.slow
def test_bar_barwidth_position(self):
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
self._check_bar_alignment(
df, kind="bar", stacked=False, width=0.9, position=0.2
)
@@ -1084,7 +1088,7 @@ def test_bar_barwidth_position(self):
@pytest.mark.slow
def test_bar_barwidth_position_int(self):
# GH 12979
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
for w in [1, 1.0]:
ax = df.plot.bar(stacked=True, width=w)
@@ -1103,7 +1107,7 @@ def test_bar_barwidth_position_int(self):
@pytest.mark.slow
def test_bar_bottom_left(self):
- df = DataFrame(rand(5, 5))
+ df = DataFrame(np.random.rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
@@ -1179,7 +1183,7 @@ def test_bar_categorical(self):
@pytest.mark.slow
def test_plot_scatter(self):
df = DataFrame(
- randn(6, 4),
+ np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
@@ -1291,7 +1295,7 @@ def test_plot_scatter_with_categorical_data(self, x, y):
@pytest.mark.slow
def test_plot_scatter_with_c(self):
df = DataFrame(
- randn(6, 4),
+ np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
@@ -1394,7 +1398,7 @@ def test_scatter_colorbar_different_cmap(self):
@pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(
- randn(6, 4),
+ np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
@@ -1407,7 +1411,9 @@ def test_plot_bar(self):
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(
- randn(10, 15), index=list(string.ascii_letters[:10]), columns=range(15)
+ np.random.randn(10, 15),
+ index=list(string.ascii_letters[:10]),
+ columns=range(15),
)
_check_plot_works(df.plot.bar)
@@ -1523,7 +1529,7 @@ def test_bar_subplots_center(self):
@pytest.mark.slow
def test_bar_align_single_column(self):
- df = DataFrame(randn(5))
+ df = DataFrame(np.random.randn(5))
self._check_bar_alignment(df, kind="bar", stacked=False)
self._check_bar_alignment(df, kind="bar", stacked=True)
self._check_bar_alignment(df, kind="barh", stacked=False)
@@ -1641,7 +1647,7 @@ def test_boxplot_vertical(self):
@pytest.mark.slow
def test_boxplot_return_type(self):
df = DataFrame(
- randn(6, 4),
+ np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
@@ -1683,7 +1689,7 @@ def test_boxplot_subplots_return_type(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
- df = DataFrame(randn(100, 4))
+ df = DataFrame(np.random.randn(100, 4))
ax = _check_plot_works(df.plot, kind="kde")
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
@@ -1710,7 +1716,7 @@ def test_kde_missing_vals(self):
def test_hist_df(self):
from matplotlib.patches import Rectangle
- df = DataFrame(randn(100, 4))
+ df = DataFrame(np.random.randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
@@ -1918,16 +1924,16 @@ def test_hist_df_coord(self):
@pytest.mark.slow
def test_plot_int_columns(self):
- df = DataFrame(randn(100, 4)).cumsum()
+ df = DataFrame(np.random.randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@pytest.mark.slow
def test_df_legend_labels(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
- df = DataFrame(rand(3, 3), columns=["a", "b", "c"])
- df2 = DataFrame(rand(3, 3), columns=["d", "e", "f"])
- df3 = DataFrame(rand(3, 3), columns=["g", "h", "i"])
- df4 = DataFrame(rand(3, 3), columns=["j", "k", "l"])
+ df = DataFrame(np.random.rand(3, 3), columns=["a", "b", "c"])
+ df2 = DataFrame(np.random.rand(3, 3), columns=["d", "e", "f"])
+ df3 = DataFrame(np.random.rand(3, 3), columns=["g", "h", "i"])
+ df4 = DataFrame(np.random.rand(3, 3), columns=["j", "k", "l"])
for kind in kinds:
@@ -1956,9 +1962,9 @@ def test_df_legend_labels(self):
# Time Series
ind = date_range("1/1/2014", periods=3)
- df = DataFrame(randn(3, 3), columns=["a", "b", "c"], index=ind)
- df2 = DataFrame(randn(3, 3), columns=["d", "e", "f"], index=ind)
- df3 = DataFrame(randn(3, 3), columns=["g", "h", "i"], index=ind)
+ df = DataFrame(np.random.randn(3, 3), columns=["a", "b", "c"], index=ind)
+ df2 = DataFrame(np.random.randn(3, 3), columns=["d", "e", "f"], index=ind)
+ df3 = DataFrame(np.random.randn(3, 3), columns=["g", "h", "i"], index=ind)
ax = df.plot(legend=True, secondary_y="b")
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df2.plot(legend=False, ax=ax)
@@ -2012,7 +2018,7 @@ def test_missing_marker_multi_plots_on_same_ax(self):
def test_legend_name(self):
multi = DataFrame(
- randn(4, 4),
+ np.random.randn(4, 4),
columns=[np.array(["a", "a", "b", "b"]), np.array(["x", "y", "x", "y"])],
)
multi.columns.names = ["group", "individual"]
@@ -2021,7 +2027,7 @@ def test_legend_name(self):
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
@@ -2038,7 +2044,7 @@ def test_legend_name(self):
@pytest.mark.slow
def test_no_legend(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
- df = DataFrame(rand(3, 3), columns=["a", "b", "c"])
+ df = DataFrame(np.random.rand(3, 3), columns=["a", "b", "c"])
for kind in kinds:
@@ -2051,7 +2057,7 @@ def test_style_by_column(self):
fig = plt.gcf()
- df = DataFrame(randn(100, 3))
+ df = DataFrame(np.random.randn(100, 3))
for markers in [
{0: "^", 1: "+", 2: "o"},
{0: "^", 1: "+"},
@@ -2078,7 +2084,7 @@ def test_line_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
@@ -2131,7 +2137,7 @@ def test_line_colors_and_styles_subplots(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
@@ -2200,7 +2206,7 @@ def test_area_colors(self):
from matplotlib.collections import PolyCollection
custom_colors = "rgcby"
- df = DataFrame(rand(5, 5))
+ df = DataFrame(np.random.rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
@@ -2243,7 +2249,7 @@ def test_area_colors(self):
def test_hist_colors(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
@@ -2280,7 +2286,7 @@ def test_kde_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
- df = DataFrame(rand(5, 5))
+ df = DataFrame(np.random.rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
@@ -2302,7 +2308,7 @@ def test_kde_colors_and_styles_subplots(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
axes = df.plot(kind="kde", subplots=True)
for ax, c in zip(axes, list(default_colors)):
@@ -2370,7 +2376,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
default_colors = self._unpack_cycler(self.plt.rcParams)
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
bp = df.plot.box(return_type="dict")
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
@@ -2444,7 +2450,7 @@ def test_default_color_cycle(self):
colors = list("rgbk")
plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors)
- df = DataFrame(randn(5, 3))
+ df = DataFrame(np.random.randn(5, 3))
ax = df.plot()
expected = self._unpack_cycler(plt.rcParams)[:3]
@@ -2484,7 +2490,7 @@ def test_all_invalid_plot_data(self):
@pytest.mark.slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
- df = DataFrame(randn(10, 2), dtype=object)
+ df = DataFrame(np.random.randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = "a"
for kind in plotting.PlotAccessor._common_kinds:
@@ -2495,14 +2501,14 @@ def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ["area"]
- df = DataFrame(rand(10, 2), dtype=object)
+ df = DataFrame(np.random.rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = "a"
for kind in kinds:
with pytest.raises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
- df = DataFrame(randn(10, 2))
+ df = DataFrame(np.random.randn(10, 2))
with pytest.raises(ValueError):
df.plot(kind="aasdf")
@@ -3208,7 +3214,7 @@ def test_df_grid_settings(self):
)
def test_invalid_colormap(self):
- df = DataFrame(randn(3, 2), columns=["A", "B"])
+ df = DataFrame(np.random.randn(3, 2), columns=["A", "B"])
with pytest.raises(ValueError):
df.plot(colormap="invalid_colormap")
@@ -3219,11 +3225,11 @@ def test_plain_axes(self):
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
- Series(rand(10)).plot(ax=ax)
+ Series(np.random.rand(10)).plot(ax=ax)
# supplied ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
- df = DataFrame({"a": randn(8), "b": randn(8)})
+ df = DataFrame({"a": np.random.randn(8), "b": np.random.randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind="scatter", ax=ax, x="a", y="b", c="a", cmap="hsv")
@@ -3234,15 +3240,15 @@ def test_plain_axes(self):
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
- Series(rand(10)).plot(ax=ax)
- Series(rand(10)).plot(ax=cax)
+ Series(np.random.rand(10)).plot(ax=ax)
+ Series(np.random.rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1.0, loc=3)
- Series(rand(10)).plot(ax=ax)
- Series(rand(10)).plot(ax=iax)
+ Series(np.random.rand(10)).plot(ax=ax)
+ Series(np.random.rand(10)).plot(ax=iax)
def test_passed_bar_colors(self):
import matplotlib as mpl
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index d9a58e808661b..9775218e0dbf6 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -1,7 +1,6 @@
""" Test cases for .hist method """
import numpy as np
-from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
@@ -103,8 +102,8 @@ def test_hist_layout_with_by(self):
def test_hist_no_overlap(self):
from matplotlib.pyplot import gcf, subplot
- x = Series(randn(2))
- y = Series(randn(2))
+ x = Series(np.random.randn(2))
+ y = Series(np.random.randn(2))
subplot(121)
x.hist()
subplot(122)
@@ -163,7 +162,7 @@ def test_hist_df_legacy(self):
_check_plot_works(self.hist_df.hist)
# make sure layout is handled
- df = DataFrame(randn(100, 2))
+ df = DataFrame(np.random.randn(100, 2))
df[2] = to_datetime(
np.random.randint(
self.start_date_to_int64,
@@ -178,11 +177,11 @@ def test_hist_df_legacy(self):
assert not axes[1, 1].get_visible()
_check_plot_works(df[[2]].hist)
- df = DataFrame(randn(100, 1))
+ df = DataFrame(np.random.randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
- df = DataFrame(randn(100, 5))
+ df = DataFrame(np.random.randn(100, 5))
df[5] = to_datetime(
np.random.randint(
self.start_date_to_int64,
@@ -269,7 +268,7 @@ def test_hist_non_numerical_or_datetime_raises(self):
@pytest.mark.slow
def test_hist_layout(self):
- df = DataFrame(randn(100, 2))
+ df = DataFrame(np.random.randn(100, 2))
df[2] = to_datetime(
np.random.randint(
self.start_date_to_int64,
@@ -404,7 +403,7 @@ def test_grouped_hist_legacy(self):
from pandas.plotting._matplotlib.hist import _grouped_hist
- df = DataFrame(randn(500, 1), columns=["A"])
+ df = DataFrame(np.random.randn(500, 1), columns=["A"])
df["B"] = to_datetime(
np.random.randint(
self.start_date_to_int64,
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 2838bef2a10b0..f37d83cd0783e 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -1,8 +1,6 @@
""" Test cases for misc plot functions """
import numpy as np
-from numpy import random
-from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
@@ -101,7 +99,7 @@ def test_scatter_matrix_axis(self):
scatter_matrix = plotting.scatter_matrix
with tm.RNGContext(42):
- df = DataFrame(randn(100, 3))
+ df = DataFrame(np.random.randn(100, 3))
# we are plotting multiples on a sub-plot
with tm.assert_produces_warning(
@@ -166,9 +164,9 @@ def test_andrews_curves(self, iris):
length = 10
df = DataFrame(
{
- "A": random.rand(length),
- "B": random.rand(length),
- "C": random.rand(length),
+ "A": np.random.rand(length),
+ "B": np.random.rand(length),
+ "C": np.random.rand(length),
"Name": ["A"] * length,
}
)
@@ -353,11 +351,11 @@ def test_get_standard_colors_random_seed(self):
# GH17525
df = DataFrame(np.zeros((10, 10)))
- # Make sure that the random seed isn't reset by get_standard_colors
+ # Make sure that the np.random.seed isn't reset by get_standard_colors
plotting.parallel_coordinates(df, 0)
- rand1 = random.random()
+ rand1 = np.random.random()
plotting.parallel_coordinates(df, 0)
- rand2 = random.random()
+ rand2 = np.random.random()
assert rand1 != rand2
# Make sure it produces the same colors every time it's called
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 271cf65433afe..777bc914069a6 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -5,7 +5,6 @@
from itertools import chain
import numpy as np
-from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
@@ -59,7 +58,7 @@ def test_plot(self):
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
- ax = _check_plot_works(Series(randn(10)).plot.bar, color="black")
+ ax = _check_plot_works(Series(np.random.randn(10)).plot.bar, color="black")
self._check_colors([ax.patches[0]], facecolors=["black"])
# GH 6951
@@ -267,7 +266,7 @@ def test_bar_user_colors(self):
assert result == expected
def test_rotation(self):
- df = DataFrame(randn(5, 5))
+ df = DataFrame(np.random.randn(5, 5))
# Default rot 0
_, ax = self.plt.subplots()
axes = df.plot(ax=ax)
@@ -282,7 +281,7 @@ def test_irregular_datetime(self):
rng = date_range("1/1/2000", "3/1/2000")
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
- ser = Series(randn(len(rng)), rng)
+ ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ax = ser.plot(ax=ax)
xp = DatetimeConverter.convert(datetime(1999, 1, 1), "", ax)
@@ -459,8 +458,8 @@ def test_hist_layout_with_by(self):
def test_hist_no_overlap(self):
from matplotlib.pyplot import gcf, subplot
- x = Series(randn(2))
- y = Series(randn(2))
+ x = Series(np.random.randn(2))
+ y = Series(np.random.randn(2))
subplot(121)
x.hist()
subplot(122)
@@ -590,7 +589,7 @@ def test_secondary_logy(self, input_logy, expected_scale):
@pytest.mark.slow
def test_plot_fails_with_dupe_color_and_style(self):
- x = Series(randn(2))
+ x = Series(np.random.randn(2))
with pytest.raises(ValueError):
_, ax = self.plt.subplots()
x.plot(style="k--", color="k", ax=ax)
@@ -734,7 +733,7 @@ def test_dup_datetime_index_plot(self):
dr1 = date_range("1/1/2009", periods=4)
dr2 = date_range("1/2/2009", periods=4)
index = dr1.append(dr2)
- values = randn(index.size)
+ values = np.random.randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@@ -763,7 +762,7 @@ def test_errorbar_plot(self):
s = Series(np.arange(10), name="x")
s_err = np.random.randn(10)
- d_err = DataFrame(randn(10, 2), index=s.index, columns=["x", "y"])
+ d_err = DataFrame(np.random.randn(10, 2), index=s.index, columns=["x", "y"])
# test line and bar plots
kinds = ["line", "bar"]
for kind in kinds:
@@ -785,7 +784,7 @@ def test_errorbar_plot(self):
ix = date_range("1/1/2000", "1/1/2001", freq="M")
ts = Series(np.arange(12), index=ix, name="x")
ts_err = Series(np.random.randn(12), index=ix)
- td_err = DataFrame(randn(12, 2), index=ix, columns=["x", "y"])
+ td_err = DataFrame(np.random.randn(12, 2), index=ix, columns=["x", "y"])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
diff --git a/pandas/tests/reshape/concat/test_series.py b/pandas/tests/reshape/concat/test_series.py
index aea2840bb897f..20838a418cfea 100644
--- a/pandas/tests/reshape/concat/test_series.py
+++ b/pandas/tests/reshape/concat/test_series.py
@@ -1,5 +1,4 @@
import numpy as np
-from numpy.random import randn
import pytest
import pandas as pd
@@ -66,8 +65,8 @@ def test_concat_series_axis1(self, sort=sort):
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
- s = Series(randn(5), name="A")
- s2 = Series(randn(5), name="B")
+ s = Series(np.random.randn(5), name="A")
+ s2 = Series(np.random.randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
@@ -78,8 +77,8 @@ def test_concat_series_axis1(self, sort=sort):
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
- s = Series(randn(3), index=["c", "a", "b"], name="A")
- s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
+ s = Series(np.random.randn(3), index=["c", "a", "b"], name="A")
+ s2 = Series(np.random.randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
@@ -103,8 +102,8 @@ def test_concat_series_axis1_names_applied(self):
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
- s1 = Series(randn(len(dates)), index=dates, name="value")
- s2 = Series(randn(len(dates)), index=dates, name="value")
+ s1 = Series(np.random.randn(len(dates)), index=dates, name="value")
+ s2 = Series(np.random.randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index c1efbb8536d68..5968fd1834f8c 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -1,5 +1,4 @@
import numpy as np
-from numpy.random import randn
import pytest
import pandas as pd
@@ -290,10 +289,10 @@ def test_join_empty_bug(self):
def test_join_unconsolidated(self):
# GH #331
- a = DataFrame(randn(30, 2), columns=["a", "b"])
- c = Series(randn(30))
+ a = DataFrame(np.random.randn(30, 2), columns=["a", "b"])
+ c = Series(np.random.randn(30))
a["c"] = c
- d = DataFrame(randn(30, 1), columns=["q"])
+ d = DataFrame(np.random.randn(30, 1), columns=["q"])
# it works!
a.join(d)
@@ -412,8 +411,8 @@ def test_join_hierarchical_mixed(self):
def test_join_float64_float32(self):
- a = DataFrame(randn(10, 2), columns=["a", "b"], dtype=np.float64)
- b = DataFrame(randn(10, 1), columns=["c"], dtype=np.float32)
+ a = DataFrame(np.random.randn(10, 2), columns=["a", "b"], dtype=np.float64)
+ b = DataFrame(np.random.randn(10, 1), columns=["c"], dtype=np.float32)
joined = a.join(b)
assert joined.dtypes["a"] == "float64"
assert joined.dtypes["b"] == "float64"
diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py
index 68096192c51ea..65673bdde4257 100644
--- a/pandas/tests/reshape/merge/test_multi.py
+++ b/pandas/tests/reshape/merge/test_multi.py
@@ -1,5 +1,4 @@
import numpy as np
-from numpy.random import randn
import pytest
import pandas as pd
@@ -406,10 +405,10 @@ def test_left_merge_na_buglet(self):
left = DataFrame(
{
"id": list("abcde"),
- "v1": randn(5),
- "v2": randn(5),
+ "v1": np.random.randn(5),
+ "v2": np.random.randn(5),
"dummy": list("abcde"),
- "v3": randn(5),
+ "v3": np.random.randn(5),
},
columns=["id", "v1", "v2", "dummy", "v3"],
)
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 6d4c1594146de..2d862fda013d5 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -2,15 +2,14 @@
import re
import numpy as np
-from numpy.random import randn
import pytest
import pandas._testing as tm
from pandas.core.api import DataFrame, Index, Series
from pandas.core.computation import expressions as expr
-_frame = DataFrame(randn(10000, 4), columns=list("ABCD"), dtype="float64")
-_frame2 = DataFrame(randn(100, 4), columns=list("ABCD"), dtype="float64")
+_frame = DataFrame(np.random.randn(10000, 4), columns=list("ABCD"), dtype="float64")
+_frame2 = DataFrame(np.random.randn(100, 4), columns=list("ABCD"), dtype="float64")
_mixed = DataFrame(
{
"A": _frame["A"].copy(),
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 9be5abb9dda65..f52d0d0fccab8 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -2,7 +2,6 @@
import re
import numpy as np
-from numpy.random import randint
import pytest
from pandas._libs import lib
@@ -367,7 +366,12 @@ def test_iter_single_element(self):
tm.assert_series_equal(ds, s)
def test_iter_object_try_string(self):
- ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(4)])
+ ds = Series(
+ [
+ slice(None, np.random.randint(10), np.random.randint(10, 20))
+ for _ in range(4)
+ ]
+ )
i, s = 100, "h"
diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py
index e5c5579d35a5c..1780925202593 100644
--- a/pandas/tests/window/conftest.py
+++ b/pandas/tests/window/conftest.py
@@ -1,7 +1,6 @@
from datetime import datetime, timedelta
import numpy as np
-from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
@@ -254,7 +253,7 @@ def consistency_data(request):
def _create_arr():
"""Internal function to mock an array."""
- arr = randn(100)
+ arr = np.random.randn(100)
locs = np.arange(20, 40)
arr[locs] = np.NaN
return arr
@@ -276,7 +275,7 @@ def _create_series():
def _create_frame():
"""Internal function to mock DataFrame."""
rng = _create_rng()
- return DataFrame(randn(100, 10), index=rng, columns=np.arange(10))
+ return DataFrame(np.random.randn(100, 10), index=rng, columns=np.arange(10))
@pytest.fixture
diff --git a/pandas/tests/window/moments/conftest.py b/pandas/tests/window/moments/conftest.py
index 39e6ae71162a9..2fab7f5c91c09 100644
--- a/pandas/tests/window/moments/conftest.py
+++ b/pandas/tests/window/moments/conftest.py
@@ -1,5 +1,4 @@
import numpy as np
-from numpy.random import randn
import pytest
from pandas import Series
@@ -7,8 +6,8 @@
@pytest.fixture
def binary_ew_data():
- A = Series(randn(50), index=np.arange(50))
- B = A[2:] + randn(48)
+ A = Series(np.random.randn(50), index=np.arange(50))
+ B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py
index 089ec697b5b1c..2718bdabee96a 100644
--- a/pandas/tests/window/moments/test_moments_consistency_ewm.py
+++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py
@@ -1,5 +1,4 @@
import numpy as np
-from numpy.random import randn
import pytest
from pandas import DataFrame, Series, concat
@@ -63,7 +62,7 @@ def test_different_input_array_raise_exception(name, binary_ew_data):
msg = "Input arrays must be of the same type!"
# exception raised is Exception
with pytest.raises(Exception, match=msg):
- getattr(A.ewm(com=20, min_periods=5), name)(randn(50))
+ getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
@pytest.mark.slow
diff --git a/pandas/tests/window/moments/test_moments_consistency_expanding.py b/pandas/tests/window/moments/test_moments_consistency_expanding.py
index 3ec91dcb60610..eb348fda5782b 100644
--- a/pandas/tests/window/moments/test_moments_consistency_expanding.py
+++ b/pandas/tests/window/moments/test_moments_consistency_expanding.py
@@ -1,7 +1,6 @@
import warnings
import numpy as np
-from numpy.random import randn
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, isna, notna
@@ -34,7 +33,7 @@ def _check_expanding(
def _check_expanding_has_min_periods(func, static_comp, has_min_periods):
- ser = Series(randn(50))
+ ser = Series(np.random.randn(50))
if has_min_periods:
result = func(ser, min_periods=30)
@@ -46,7 +45,7 @@ def _check_expanding_has_min_periods(func, static_comp, has_min_periods):
assert isna(result.iloc[13])
assert notna(result.iloc[14])
- ser2 = Series(randn(20))
+ ser2 = Series(np.random.randn(20))
result = func(ser2, min_periods=5)
assert isna(result[3])
assert notna(result[4])
@@ -62,7 +61,7 @@ def _check_expanding_has_min_periods(func, static_comp, has_min_periods):
def test_expanding_corr(series):
A = series.dropna()
- B = (A + randn(len(A)))[:-5]
+ B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().corr(B)
@@ -88,7 +87,7 @@ def test_expanding_quantile(series):
def test_expanding_cov(series):
A = series
- B = (A + randn(len(A)))[:-5]
+ B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().cov(B)
diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py
index 6ab53c8e2ec0d..b7b05c1a6e30d 100644
--- a/pandas/tests/window/moments/test_moments_consistency_rolling.py
+++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py
@@ -2,7 +2,6 @@
import warnings
import numpy as np
-from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
@@ -34,7 +33,7 @@ def _rolling_consistency_cases():
# binary moments
def test_rolling_cov(series):
A = series
- B = A + randn(len(A))
+ B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
@@ -42,7 +41,7 @@ def test_rolling_cov(series):
def test_rolling_corr(series):
A = series
- B = A + randn(len(A))
+ B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py
index 605b85344ba76..def6d7289fec2 100644
--- a/pandas/tests/window/moments/test_moments_ewm.py
+++ b/pandas/tests/window/moments/test_moments_ewm.py
@@ -1,5 +1,4 @@
import numpy as np
-from numpy.random import randn
import pytest
from pandas import DataFrame, Series
@@ -305,7 +304,7 @@ def test_ew_empty_series(method):
@pytest.mark.parametrize("name", ["mean", "var", "vol"])
def test_ew_min_periods(min_periods, name):
# excluding NaNs correctly
- arr = randn(50)
+ arr = np.random.randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
| - [x] closes #37053 ~~(keep this issue open until both #37492 and #37117 are completed)~~
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
# update 2020-10-30
This is the replacement for #37103, since I messed up the git timeline in the previous PR(the conflicts are too complex to fix). The separate CI is in #37117.
The whole project is reformatted except `pandas/_testing.py` for there are quite a few `tm.randn` used in testing files.
below is the script to do the whole clean-up
```
import os
import re
class NumpyRandomRelatedScript:
def __init__(self, base_dir: str = "/dir/of/pandas/pandas") -> None:
self.base_dir = base_dir
self.py_files = []
self.backup_files = []
self.p1 = re.compile("import numpy as np")
self.p2 = re.compile("from numpy.random import[ ,a-zA-Z]*[\s]")
self.p3 = re.compile("from numpy import random[\s]")
def search_py_files(self, pandas_file_dir: str) -> None:
if os.path.isfile(pandas_file_dir):
if pandas_file_dir[-3:] == ".py":
if pandas_file_dir != os.path.join(self.base_dir, "_testing.py"):
self.py_files.append(pandas_file_dir)
elif os.path.isdir(pandas_file_dir):
for d in os.listdir(pandas_file_dir):
self.search_py_files(os.path.join(pandas_file_dir, d))
def do_the_clean_up(self, file_dir: str) -> None:
with open(file_dir, "r") as file:
data = file.read()
m1, m2, m3 = (
re.search(self.p1, data),
re.search(self.p2, data),
re.search(self.p3, data),
)
if not (m2 or m3):
# print("There's no need to change, please recheck!")
return
backup_dir = file_dir + ".issue37053_backup"
self.backup_files.append(backup_dir)
print("Backup: " + backup_dir)
with open(backup_dir, "w+") as file:
file.write(data)
with open(file_dir, "w+") as file:
if m2:
if not m1:
data = re.sub(self.p2, "import numpy as np\n", data)
m1 = True
else:
data = re.sub(self.p2, "", data)
methods = (
m2.group(0)
.replace("from numpy.random import", "")
.replace(" ", "")
.replace("\n", "")
.split(",")
)
if isinstance(methods, str):
methods = [methods]
for method in methods:
data = re.sub(
r"[\s]{meth}[^a-z]".format(meth=method),
" np.random.{meth}(".format(meth=method),
data,
)
data = re.sub(
r"[\s]-{meth}[^a-z]".format(meth=method),
" -np.random.{meth}(".format(meth=method),
data,
)
data = re.sub(
r"[\(]{meth}[^a-z]".format(meth=method),
"(np.random.{meth}(".format(meth=method),
data,
)
if m3:
if not m1:
data = re.sub(self.p3, "import numpy as np\n", data)
else:
data = re.sub(self.p3, "", data)
data = re.sub(r"[\s]random.{1}", " np.random.", data)
data = re.sub(r"[\(]random.{1}", "(np.random.", data)
file.write(data)
print("Clean: " + file_dir)
def remove_backup(self) -> None:
for file in self.backup_files:
print("Remove: " + file)
os.remove(file)
if __name__ == "__main__":
script = NumpyRandomRelatedScript()
script.search_py_files(script.base_dir)
for f in script.py_files:
script.do_the_clean_up(f)
script.remove_backup()
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/37492 | 2020-10-29T18:41:55Z | 2020-10-30T13:22:51Z | 2020-10-30T13:22:51Z | 2020-10-30T14:08:07Z |
TST/REF: loc/iloc/at/iat tests go in tests.indexing | diff --git a/pandas/tests/frame/indexing/test_at.py b/pandas/tests/indexing/test_at.py
similarity index 100%
rename from pandas/tests/frame/indexing/test_at.py
rename to pandas/tests/indexing/test_at.py
diff --git a/pandas/tests/frame/indexing/test_iat.py b/pandas/tests/indexing/test_iat.py
similarity index 100%
rename from pandas/tests/frame/indexing/test_iat.py
rename to pandas/tests/indexing/test_iat.py
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 4ef6463fd9e31..867941a97b598 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -776,3 +776,31 @@ def test_iloc_setitem_series_duplicate_columns(self):
)
df.iloc[:, 0] = df.iloc[:, 0].astype(np.float64)
assert df.dtypes.iloc[2] == np.int64
+
+
+class TestILocSeries:
+ def test_iloc(self):
+ ser = Series(np.random.randn(10), index=list(range(0, 20, 2)))
+
+ for i in range(len(ser)):
+ result = ser.iloc[i]
+ exp = ser[ser.index[i]]
+ tm.assert_almost_equal(result, exp)
+
+ # pass a slice
+ result = ser.iloc[slice(1, 3)]
+ expected = ser.loc[2:4]
+ tm.assert_series_equal(result, expected)
+
+ # test slice is a view
+ result[:] = 0
+ assert (ser[1:3] == 0).all()
+
+ # list of integers
+ result = ser.iloc[[0, 2, 3, 4, 5]]
+ expected = ser.reindex(ser.index[[0, 2, 3, 4, 5]])
+ tm.assert_series_equal(result, expected)
+
+ def test_iloc_getitem_nonunique(self):
+ ser = Series([0, 1, 2], index=[0, 1, 0])
+ assert ser.iloc[2] == 2
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 8fb418ab78307..7748392a893eb 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1212,3 +1212,153 @@ def test_loc_with_period_index_indexer():
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
tm.assert_frame_equal(df, df.loc[list(idx)])
+
+
+class TestLocSeries:
+ @pytest.mark.parametrize("val,expected", [(2 ** 63 - 1, 3), (2 ** 63, 4)])
+ def test_loc_uint64(self, val, expected):
+ # see GH#19399
+ ser = Series({2 ** 63 - 1: 3, 2 ** 63: 4})
+ assert ser.loc[val] == expected
+
+ def test_loc_getitem(self, string_series, datetime_series):
+ inds = string_series.index[[3, 4, 7]]
+ tm.assert_series_equal(string_series.loc[inds], string_series.reindex(inds))
+ tm.assert_series_equal(string_series.iloc[5::2], string_series[5::2])
+
+ # slice with indices
+ d1, d2 = datetime_series.index[[5, 15]]
+ result = datetime_series.loc[d1:d2]
+ expected = datetime_series.truncate(d1, d2)
+ tm.assert_series_equal(result, expected)
+
+ # boolean
+ mask = string_series > string_series.median()
+ tm.assert_series_equal(string_series.loc[mask], string_series[mask])
+
+ # ask for index value
+ assert datetime_series.loc[d1] == datetime_series[d1]
+ assert datetime_series.loc[d2] == datetime_series[d2]
+
+ def test_loc_getitem_not_monotonic(self, datetime_series):
+ d1, d2 = datetime_series.index[[5, 15]]
+
+ ts2 = datetime_series[::2][[1, 2, 0]]
+
+ msg = r"Timestamp\('2000-01-10 00:00:00'\)"
+ with pytest.raises(KeyError, match=msg):
+ ts2.loc[d1:d2]
+ with pytest.raises(KeyError, match=msg):
+ ts2.loc[d1:d2] = 0
+
+ def test_loc_getitem_setitem_integer_slice_keyerrors(self):
+ ser = Series(np.random.randn(10), index=list(range(0, 20, 2)))
+
+ # this is OK
+ cp = ser.copy()
+ cp.iloc[4:10] = 0
+ assert (cp.iloc[4:10] == 0).all()
+
+ # so is this
+ cp = ser.copy()
+ cp.iloc[3:11] = 0
+ assert (cp.iloc[3:11] == 0).values.all()
+
+ result = ser.iloc[2:6]
+ result2 = ser.loc[3:11]
+ expected = ser.reindex([4, 6, 8, 10])
+
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(result2, expected)
+
+ # non-monotonic, raise KeyError
+ s2 = ser.iloc[list(range(5)) + list(range(9, 4, -1))]
+ with pytest.raises(KeyError, match=r"^3$"):
+ s2.loc[3:11]
+ with pytest.raises(KeyError, match=r"^3$"):
+ s2.loc[3:11] = 0
+
+ def test_loc_getitem_iterator(self, string_series):
+ idx = iter(string_series.index[:10])
+ result = string_series.loc[idx]
+ tm.assert_series_equal(result, string_series[:10])
+
+ def test_loc_setitem_boolean(self, string_series):
+ mask = string_series > string_series.median()
+
+ result = string_series.copy()
+ result.loc[mask] = 0
+ expected = string_series
+ expected[mask] = 0
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_setitem_corner(self, string_series):
+ inds = list(string_series.index[[5, 8, 12]])
+ string_series.loc[inds] = 5
+ msg = r"\['foo'\] not in index"
+ with pytest.raises(KeyError, match=msg):
+ string_series.loc[inds + ["foo"]] = 5
+
+ def test_basic_setitem_with_labels(self, datetime_series):
+ indices = datetime_series.index[[5, 10, 15]]
+
+ cp = datetime_series.copy()
+ exp = datetime_series.copy()
+ cp[indices] = 0
+ exp.loc[indices] = 0
+ tm.assert_series_equal(cp, exp)
+
+ cp = datetime_series.copy()
+ exp = datetime_series.copy()
+ cp[indices[0] : indices[2]] = 0
+ exp.loc[indices[0] : indices[2]] = 0
+ tm.assert_series_equal(cp, exp)
+
+ def test_loc_setitem_listlike_of_ints(self):
+
+ # integer indexes, be careful
+ ser = Series(np.random.randn(10), index=list(range(0, 20, 2)))
+ inds = [0, 4, 6]
+ arr_inds = np.array([0, 4, 6])
+
+ cp = ser.copy()
+ exp = ser.copy()
+ ser[inds] = 0
+ ser.loc[inds] = 0
+ tm.assert_series_equal(cp, exp)
+
+ cp = ser.copy()
+ exp = ser.copy()
+ ser[arr_inds] = 0
+ ser.loc[arr_inds] = 0
+ tm.assert_series_equal(cp, exp)
+
+ inds_notfound = [0, 4, 5, 6]
+ arr_inds_notfound = np.array([0, 4, 5, 6])
+ msg = r"\[5\] not in index"
+ with pytest.raises(KeyError, match=msg):
+ ser[inds_notfound] = 0
+ with pytest.raises(Exception, match=msg):
+ ser[arr_inds_notfound] = 0
+
+ def test_loc_setitem_dt64tz_values(self):
+ # GH#12089
+ ser = Series(
+ date_range("2011-01-01", periods=3, tz="US/Eastern"),
+ index=["a", "b", "c"],
+ )
+ s2 = ser.copy()
+ expected = Timestamp("2011-01-03", tz="US/Eastern")
+ s2.loc["a"] = expected
+ result = s2.loc["a"]
+ assert result == expected
+
+ s2 = ser.copy()
+ s2.iloc[0] = expected
+ result = s2.iloc[0]
+ assert result == expected
+
+ s2 = ser.copy()
+ s2["a"] = expected
+ result = s2["a"]
+ assert result == expected
diff --git a/pandas/tests/series/indexing/test_iloc.py b/pandas/tests/series/indexing/test_iloc.py
deleted file mode 100644
index f276eb5b0b23d..0000000000000
--- a/pandas/tests/series/indexing/test_iloc.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import numpy as np
-
-from pandas import Series
-import pandas._testing as tm
-
-
-def test_iloc():
- s = Series(np.random.randn(10), index=list(range(0, 20, 2)))
-
- for i in range(len(s)):
- result = s.iloc[i]
- exp = s[s.index[i]]
- tm.assert_almost_equal(result, exp)
-
- # pass a slice
- result = s.iloc[slice(1, 3)]
- expected = s.loc[2:4]
- tm.assert_series_equal(result, expected)
-
- # test slice is a view
- result[:] = 0
- assert (s[1:3] == 0).all()
-
- # list of integers
- result = s.iloc[[0, 2, 3, 4, 5]]
- expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
- tm.assert_series_equal(result, expected)
-
-
-def test_iloc_nonunique():
- s = Series([0, 1, 2], index=[0, 1, 0])
- assert s.iloc[2] == 2
diff --git a/pandas/tests/series/indexing/test_loc.py b/pandas/tests/series/indexing/test_loc.py
deleted file mode 100644
index 368adcfb32215..0000000000000
--- a/pandas/tests/series/indexing/test_loc.py
+++ /dev/null
@@ -1,159 +0,0 @@
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas import Series, Timestamp
-import pandas._testing as tm
-
-
-@pytest.mark.parametrize("val,expected", [(2 ** 63 - 1, 3), (2 ** 63, 4)])
-def test_loc_uint64(val, expected):
- # see gh-19399
- s = Series({2 ** 63 - 1: 3, 2 ** 63: 4})
- assert s.loc[val] == expected
-
-
-def test_loc_getitem(string_series, datetime_series):
- inds = string_series.index[[3, 4, 7]]
- tm.assert_series_equal(string_series.loc[inds], string_series.reindex(inds))
- tm.assert_series_equal(string_series.iloc[5::2], string_series[5::2])
-
- # slice with indices
- d1, d2 = datetime_series.index[[5, 15]]
- result = datetime_series.loc[d1:d2]
- expected = datetime_series.truncate(d1, d2)
- tm.assert_series_equal(result, expected)
-
- # boolean
- mask = string_series > string_series.median()
- tm.assert_series_equal(string_series.loc[mask], string_series[mask])
-
- # ask for index value
- assert datetime_series.loc[d1] == datetime_series[d1]
- assert datetime_series.loc[d2] == datetime_series[d2]
-
-
-def test_loc_getitem_not_monotonic(datetime_series):
- d1, d2 = datetime_series.index[[5, 15]]
-
- ts2 = datetime_series[::2][[1, 2, 0]]
-
- msg = r"Timestamp\('2000-01-10 00:00:00'\)"
- with pytest.raises(KeyError, match=msg):
- ts2.loc[d1:d2]
- with pytest.raises(KeyError, match=msg):
- ts2.loc[d1:d2] = 0
-
-
-def test_loc_getitem_setitem_integer_slice_keyerrors():
- s = Series(np.random.randn(10), index=list(range(0, 20, 2)))
-
- # this is OK
- cp = s.copy()
- cp.iloc[4:10] = 0
- assert (cp.iloc[4:10] == 0).all()
-
- # so is this
- cp = s.copy()
- cp.iloc[3:11] = 0
- assert (cp.iloc[3:11] == 0).values.all()
-
- result = s.iloc[2:6]
- result2 = s.loc[3:11]
- expected = s.reindex([4, 6, 8, 10])
-
- tm.assert_series_equal(result, expected)
- tm.assert_series_equal(result2, expected)
-
- # non-monotonic, raise KeyError
- s2 = s.iloc[list(range(5)) + list(range(9, 4, -1))]
- with pytest.raises(KeyError, match=r"^3$"):
- s2.loc[3:11]
- with pytest.raises(KeyError, match=r"^3$"):
- s2.loc[3:11] = 0
-
-
-def test_loc_getitem_iterator(string_series):
- idx = iter(string_series.index[:10])
- result = string_series.loc[idx]
- tm.assert_series_equal(result, string_series[:10])
-
-
-def test_loc_setitem_boolean(string_series):
- mask = string_series > string_series.median()
-
- result = string_series.copy()
- result.loc[mask] = 0
- expected = string_series
- expected[mask] = 0
- tm.assert_series_equal(result, expected)
-
-
-def test_loc_setitem_corner(string_series):
- inds = list(string_series.index[[5, 8, 12]])
- string_series.loc[inds] = 5
- msg = r"\['foo'\] not in index"
- with pytest.raises(KeyError, match=msg):
- string_series.loc[inds + ["foo"]] = 5
-
-
-def test_basic_setitem_with_labels(datetime_series):
- indices = datetime_series.index[[5, 10, 15]]
-
- cp = datetime_series.copy()
- exp = datetime_series.copy()
- cp[indices] = 0
- exp.loc[indices] = 0
- tm.assert_series_equal(cp, exp)
-
- cp = datetime_series.copy()
- exp = datetime_series.copy()
- cp[indices[0] : indices[2]] = 0
- exp.loc[indices[0] : indices[2]] = 0
- tm.assert_series_equal(cp, exp)
-
- # integer indexes, be careful
- s = Series(np.random.randn(10), index=list(range(0, 20, 2)))
- inds = [0, 4, 6]
- arr_inds = np.array([0, 4, 6])
-
- cp = s.copy()
- exp = s.copy()
- s[inds] = 0
- s.loc[inds] = 0
- tm.assert_series_equal(cp, exp)
-
- cp = s.copy()
- exp = s.copy()
- s[arr_inds] = 0
- s.loc[arr_inds] = 0
- tm.assert_series_equal(cp, exp)
-
- inds_notfound = [0, 4, 5, 6]
- arr_inds_notfound = np.array([0, 4, 5, 6])
- msg = r"\[5\] not in index"
- with pytest.raises(KeyError, match=msg):
- s[inds_notfound] = 0
- with pytest.raises(Exception, match=msg):
- s[arr_inds_notfound] = 0
-
- # GH12089
- # with tz for values
- s = Series(
- pd.date_range("2011-01-01", periods=3, tz="US/Eastern"), index=["a", "b", "c"]
- )
- s2 = s.copy()
- expected = Timestamp("2011-01-03", tz="US/Eastern")
- s2.loc["a"] = expected
- result = s2.loc["a"]
- assert result == expected
-
- s2 = s.copy()
- s2.iloc[0] = expected
- result = s2.iloc[0]
- assert result == expected
-
- s2 = s.copy()
- s2["a"] = expected
- result = s2["a"]
- assert result == expected
| not tests.(series|frame).indexing
`(Series|DataFrame).__(getitem|setitem)__` tests go in the tests.(series|frame).indexing
| https://api.github.com/repos/pandas-dev/pandas/pulls/37487 | 2020-10-29T15:54:34Z | 2020-10-30T13:28:08Z | 2020-10-30T13:28:08Z | 2020-10-30T14:44:14Z |
REF: separate out cases in setitem_with_indexer | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index dad9ba446941f..e376f930c8c63 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1673,66 +1673,14 @@ def _setitem_with_indexer(self, indexer, value):
# we have an equal len Frame
if isinstance(value, ABCDataFrame):
- sub_indexer = list(indexer)
- multiindex_indexer = isinstance(labels, ABCMultiIndex)
- # TODO: we are implicitly assuming value.columns is unique
- unique_cols = value.columns.is_unique
-
- if not unique_cols and value.columns.equals(self.obj.columns):
- # We assume we are already aligned, see
- # test_iloc_setitem_frame_duplicate_columns_multiple_blocks
- for loc in ilocs:
- item = item_labels[loc]
- if item in value:
- sub_indexer[info_axis] = item
- v = self._align_series(
- tuple(sub_indexer),
- value.iloc[:, loc],
- multiindex_indexer,
- )
- else:
- v = np.nan
-
- self._setitem_single_column(loc, v, plane_indexer)
-
- elif not unique_cols:
- raise ValueError(
- "Setting with non-unique columns is not allowed."
- )
-
- else:
- for loc in ilocs:
- item = item_labels[loc]
- if item in value:
- sub_indexer[info_axis] = item
- v = self._align_series(
- tuple(sub_indexer), value[item], multiindex_indexer
- )
- else:
- v = np.nan
-
- self._setitem_single_column(loc, v, plane_indexer)
+ self._setitem_with_indexer_frame_value(indexer, value)
# we have an equal len ndarray/convertible to our labels
# hasattr first, to avoid coercing to ndarray without reason.
# But we may be relying on the ndarray coercion to check ndim.
# Why not just convert to an ndarray earlier on if needed?
elif np.ndim(value) == 2:
-
- # note that this coerces the dtype if we are mixed
- # GH 7551
- value = np.array(value, dtype=object)
- if len(ilocs) != value.shape[1]:
- raise ValueError(
- "Must have equal len keys and value "
- "when setting with an ndarray"
- )
-
- for i, loc in enumerate(ilocs):
- # setting with a list, re-coerces
- self._setitem_single_column(
- loc, value[:, i].tolist(), plane_indexer
- )
+ self._setitem_with_indexer_2d_value(indexer, value)
elif (
len(labels) == 1
@@ -1766,6 +1714,67 @@ def _setitem_with_indexer(self, indexer, value):
else:
self._setitem_single_block(indexer, value)
+ def _setitem_with_indexer_2d_value(self, indexer, value):
+ # We get here with np.ndim(value) == 2, excluding DataFrame,
+ # which goes through _setitem_with_indexer_frame_value
+ plane_indexer = indexer[:1]
+
+ ilocs = self._ensure_iterable_column_indexer(indexer[1])
+
+ # GH#7551 Note that this coerces the dtype if we are mixed
+ value = np.array(value, dtype=object)
+ if len(ilocs) != value.shape[1]:
+ raise ValueError(
+ "Must have equal len keys and value when setting with an ndarray"
+ )
+
+ for i, loc in enumerate(ilocs):
+ # setting with a list, re-coerces
+ self._setitem_single_column(loc, value[:, i].tolist(), plane_indexer)
+
+ def _setitem_with_indexer_frame_value(self, indexer, value: "DataFrame"):
+ ilocs = self._ensure_iterable_column_indexer(indexer[1])
+
+ sub_indexer = list(indexer)
+ plane_indexer = indexer[:1]
+
+ multiindex_indexer = isinstance(self.obj.columns, ABCMultiIndex)
+
+ unique_cols = value.columns.is_unique
+
+ if not unique_cols and value.columns.equals(self.obj.columns):
+ # We assume we are already aligned, see
+ # test_iloc_setitem_frame_duplicate_columns_multiple_blocks
+ for loc in ilocs:
+ item = self.obj.columns[loc]
+ if item in value:
+ sub_indexer[1] = item
+ val = self._align_series(
+ tuple(sub_indexer),
+ value.iloc[:, loc],
+ multiindex_indexer,
+ )
+ else:
+ val = np.nan
+
+ self._setitem_single_column(loc, val, plane_indexer)
+
+ elif not unique_cols:
+ raise ValueError("Setting with non-unique columns is not allowed.")
+
+ else:
+ for loc in ilocs:
+ item = self.obj.columns[loc]
+ if item in value:
+ sub_indexer[1] = item
+ val = self._align_series(
+ tuple(sub_indexer), value[item], multiindex_indexer
+ )
+ else:
+ val = np.nan
+
+ self._setitem_single_column(loc, val, plane_indexer)
+
def _setitem_single_column(self, loc: int, value, plane_indexer):
# positional setting on column loc
pi = plane_indexer
| Preparatory to fixing a multiple-paths issue in setitem_with_indexer | https://api.github.com/repos/pandas-dev/pandas/pulls/37481 | 2020-10-29T02:50:57Z | 2020-10-30T16:06:06Z | 2020-10-30T16:06:06Z | 2020-10-30T17:10:45Z |
ENH: Fix output of assert_frame_equal if indexes differ and check_like=True | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6f9e8d6a98d80..6f137302d4994 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -228,6 +228,7 @@ Other enhancements
- :class:`Rolling` now supports the ``closed`` argument for fixed windows (:issue:`34315`)
- :class:`DatetimeIndex` and :class:`Series` with ``datetime64`` or ``datetime64tz`` dtypes now support ``std`` (:issue:`37436`)
- :class:`Window` now supports all Scipy window types in ``win_type`` with flexible keyword argument support (:issue:`34556`)
+- :meth:`testing.assert_index_equal` now has a ``check_order`` parameter that allows indexes to be checked in an order-insensitive manner (:issue:`37478`)
.. _whatsnew_120.api_breaking.python:
diff --git a/pandas/_testing.py b/pandas/_testing.py
index a4fdb390abf42..427585704ba58 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -667,6 +667,7 @@ def assert_index_equal(
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
+ check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
@@ -696,6 +697,12 @@ def assert_index_equal(
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
+ check_order : bool, default True
+ Whether to compare the order of index entries as well as their values.
+ If True, both indexes must contain the same elements, in the same order.
+ If False, both indexes must contain the same elements, but in any order.
+
+ .. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
@@ -762,6 +769,11 @@ def _get_ilevel_values(index, level):
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
+ # If order doesn't matter then sort the index entries
+ if not check_order:
+ left = left.sort_values()
+ right = right.sort_values()
+
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
@@ -1582,9 +1594,6 @@ def assert_frame_equal(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
- if check_like:
- left, right = left.reindex_like(right), right
-
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
@@ -1596,6 +1605,7 @@ def assert_frame_equal(
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
+ check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
@@ -1609,11 +1619,15 @@ def assert_frame_equal(
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
+ check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
+ if check_like:
+ left, right = left.reindex_like(right), right
+
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index 6111797d70268..d5161ce37494b 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -145,7 +145,8 @@ def test_empty_dtypes(check_dtype):
tm.assert_frame_equal(df1, df2, **kwargs)
-def test_frame_equal_index_mismatch(obj_fixture):
+@pytest.mark.parametrize("check_like", [True, False])
+def test_frame_equal_index_mismatch(check_like, obj_fixture):
msg = f"""{obj_fixture}\\.index are different
{obj_fixture}\\.index values are different \\(33\\.33333 %\\)
@@ -156,10 +157,11 @@ def test_frame_equal_index_mismatch(obj_fixture):
df2 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "d"])
with pytest.raises(AssertionError, match=msg):
- tm.assert_frame_equal(df1, df2, obj=obj_fixture)
+ tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)
-def test_frame_equal_columns_mismatch(obj_fixture):
+@pytest.mark.parametrize("check_like", [True, False])
+def test_frame_equal_columns_mismatch(check_like, obj_fixture):
msg = f"""{obj_fixture}\\.columns are different
{obj_fixture}\\.columns values are different \\(50\\.0 %\\)
@@ -170,7 +172,7 @@ def test_frame_equal_columns_mismatch(obj_fixture):
df2 = DataFrame({"A": [1, 2, 3], "b": [4, 5, 6]}, index=["a", "b", "c"])
with pytest.raises(AssertionError, match=msg):
- tm.assert_frame_equal(df1, df2, obj=obj_fixture)
+ tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)
def test_frame_equal_block_mismatch(by_blocks_fixture, obj_fixture):
diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py
index 125af6ef78593..21d5a456e20d0 100644
--- a/pandas/tests/util/test_assert_index_equal.py
+++ b/pandas/tests/util/test_assert_index_equal.py
@@ -115,6 +115,28 @@ def test_index_equal_values_too_far(check_exact, rtol):
tm.assert_index_equal(idx1, idx2, **kwargs)
+@pytest.mark.parametrize("check_order", [True, False])
+def test_index_equal_value_oder_mismatch(check_exact, rtol, check_order):
+ idx1 = Index([1, 2, 3])
+ idx2 = Index([3, 2, 1])
+
+ msg = """Index are different
+
+Index values are different \\(66\\.66667 %\\)
+\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
+\\[right\\]: Int64Index\\(\\[3, 2, 1\\], dtype='int64'\\)"""
+
+ if check_order:
+ with pytest.raises(AssertionError, match=msg):
+ tm.assert_index_equal(
+ idx1, idx2, check_exact=check_exact, rtol=rtol, check_order=True
+ )
+ else:
+ tm.assert_index_equal(
+ idx1, idx2, check_exact=check_exact, rtol=rtol, check_order=False
+ )
+
+
def test_index_equal_level_values_mismatch(check_exact, rtol):
idx1 = MultiIndex.from_tuples([("A", 2), ("A", 2), ("B", 3), ("B", 4)])
idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])
| - [x] closes #37478
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
As described in #37478 This PR fixes some very misleading output when ```assert_frame_equal``` is called with differing index values and ```check_like=True```
I have added a ```check_order``` parameter to the ```assert_index_equal``` function. This does essentially the same thing as the ```check_like``` parmeter on ```assert_frame_equal``` (but for indexes). I think ```check_order``` gives a much clearer idea of what the parameter does, but it is a break from the previous naming. I am unsure whether I should have stuck with ```check_like``` for the new parameter. | https://api.github.com/repos/pandas-dev/pandas/pulls/37479 | 2020-10-29T01:04:40Z | 2020-11-02T13:43:42Z | 2020-11-02T13:43:42Z | 2020-11-02T13:43:46Z |
REF: helper to ensure column indexer is iterable | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 3d491c7127e38..dad9ba446941f 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1648,10 +1648,7 @@ def _setitem_with_indexer(self, indexer, value):
labels = item_labels[info_idx]
# Ensure we have something we can iterate over
- ilocs = info_idx
- if isinstance(info_idx, slice):
- ri = Index(range(len(self.obj.columns)))
- ilocs = ri[info_idx]
+ ilocs = self._ensure_iterable_column_indexer(indexer[1])
plane_indexer = indexer[:1]
lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
@@ -1900,6 +1897,20 @@ def _setitem_with_indexer_missing(self, indexer, value):
self.obj._mgr = self.obj.append(value)._mgr
self.obj._maybe_update_cacher(clear=True)
+ def _ensure_iterable_column_indexer(self, column_indexer):
+ """
+ Ensure that our column indexer is something that can be iterated over.
+ """
+ # Ensure we have something we can iterate over
+ if is_integer(column_indexer):
+ ilocs = [column_indexer]
+ elif isinstance(column_indexer, slice):
+ ri = Index(range(len(self.obj.columns)))
+ ilocs = ri[column_indexer]
+ else:
+ ilocs = column_indexer
+ return ilocs
+
def _align_series(self, indexer, ser: "Series", multiindex_indexer: bool = False):
"""
Parameters
| Helper method separated out from upcoming PR to split _setitem_with_indexer into more manageable pieces, which in turn is part of fixing a goes-through-two-separate-paths problem. | https://api.github.com/repos/pandas-dev/pandas/pulls/37475 | 2020-10-28T18:36:05Z | 2020-10-29T01:05:14Z | 2020-10-29T01:05:14Z | 2020-10-29T02:01:27Z |
CLN: Add init files in test folders | diff --git a/pandas/tests/frame/indexing/__init__.py b/pandas/tests/frame/indexing/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/window/moments/__init__.py b/pandas/tests/window/moments/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
| was not sure why they were missing there, so I added them
| https://api.github.com/repos/pandas-dev/pandas/pulls/37474 | 2020-10-28T18:22:07Z | 2020-10-28T21:55:53Z | 2020-10-28T21:55:52Z | 2020-10-28T21:56:29Z |
CI: 32 bit maybe_indices_to_slice | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 2cb4df7e054fe..e493e5e9d41d3 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -36,6 +36,7 @@ from numpy cimport (
float32_t,
float64_t,
int64_t,
+ intp_t,
ndarray,
uint8_t,
uint64_t,
@@ -490,7 +491,7 @@ def has_infs_f8(const float64_t[:] arr) -> bool:
return False
-def maybe_indices_to_slice(ndarray[int64_t] indices, int max_len):
+def maybe_indices_to_slice(ndarray[intp_t] indices, int max_len):
cdef:
Py_ssize_t i, n = len(indices)
int k, vstart, vlast, v
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 12cbee64c5e24..8f32b859fed2d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2921,7 +2921,9 @@ def __getitem__(self, key):
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
if isinstance(indexer, np.ndarray):
- indexer = lib.maybe_indices_to_slice(indexer, len(self))
+ indexer = lib.maybe_indices_to_slice(
+ indexer.astype(np.intp, copy=False), len(self)
+ )
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f23363f3a3efa..50887ef63bcab 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5191,7 +5191,9 @@ def get_slice_bound(self, label, side: str_t, kind) -> int:
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view("u1"))
else:
- slc = lib.maybe_indices_to_slice(slc.astype("i8"), len(self))
+ slc = lib.maybe_indices_to_slice(
+ slc.astype(np.intp, copy=False), len(self)
+ )
if isinstance(slc, np.ndarray):
raise KeyError(
f"Cannot get {side} slice bound for non-unique "
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 863880e222b5d..c3f1f084d76fa 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -14,7 +14,6 @@
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
- ensure_int64,
is_bool_dtype,
is_categorical_dtype,
is_dtype_equal,
@@ -187,7 +186,7 @@ def __contains__(self, key: Any) -> bool:
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
- indices = ensure_int64(indices)
+ indices = np.asarray(indices, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
@@ -586,7 +585,9 @@ def delete(self, loc):
freq = self.freq
else:
if is_list_like(loc):
- loc = lib.maybe_indices_to_slice(ensure_int64(np.array(loc)), len(self))
+ loc = lib.maybe_indices_to_slice(
+ np.asarray(loc, dtype=np.intp), len(self)
+ )
if isinstance(loc, slice) and loc.step in (1, None):
if loc.start in (0, None) or loc.stop in (len(self), None):
freq = self.freq
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index cbc0617ae96d3..bdd3afe747d1d 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2769,7 +2769,7 @@ def get_loc(self, key, method=None):
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
- if not isinstance(loc, np.ndarray) or loc.dtype != "int64":
+ if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
@@ -2816,7 +2816,7 @@ def _maybe_to_slice(loc):
stacklevel=10,
)
- loc = np.arange(start, stop, dtype="int64")
+ loc = np.arange(start, stop, dtype=np.intp)
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 8b67788b1a1a1..49ca8f9ad55e9 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -233,8 +233,8 @@ def _rebuild_blknos_and_blklocs(self) -> None:
"""
Update mgr._blknos / mgr._blklocs.
"""
- new_blknos = np.empty(self.shape[0], dtype=np.int64)
- new_blklocs = np.empty(self.shape[0], dtype=np.int64)
+ new_blknos = np.empty(self.shape[0], dtype=np.intp)
+ new_blklocs = np.empty(self.shape[0], dtype=np.intp)
new_blknos.fill(-1)
new_blklocs.fill(-1)
diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py
index c9c34916be32b..da3e18c8d9634 100644
--- a/pandas/tests/libs/test_lib.py
+++ b/pandas/tests/libs/test_lib.py
@@ -50,7 +50,7 @@ def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
- indices = np.array([], dtype=np.int64)
+ indices = np.array([], dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
@@ -58,7 +58,7 @@ def test_maybe_indices_to_slice_left_edge(self):
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
- indices = np.arange(0, end, step, dtype=np.int64)
+ indices = np.arange(0, end, step, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
@@ -73,7 +73,7 @@ def test_maybe_indices_to_slice_left_edge(self):
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2], [2, 0, -2]]:
- indices = np.array(case, dtype=np.int64)
+ indices = np.array(case, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
@@ -86,7 +86,7 @@ def test_maybe_indices_to_slice_right_edge(self):
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
- indices = np.arange(start, 99, step, dtype=np.int64)
+ indices = np.arange(start, 99, step, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
@@ -100,7 +100,7 @@ def test_maybe_indices_to_slice_right_edge(self):
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
- indices = np.array([97, 98, 99, 100], dtype=np.int64)
+ indices = np.array([97, 98, 99, 100], dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
@@ -113,7 +113,7 @@ def test_maybe_indices_to_slice_right_edge(self):
with pytest.raises(IndexError, match=msg):
target[maybe_slice]
- indices = np.array([100, 99, 98, 97], dtype=np.int64)
+ indices = np.array([100, 99, 98, 97], dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
@@ -125,7 +125,7 @@ def test_maybe_indices_to_slice_right_edge(self):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
- indices = np.array(case, dtype=np.int64)
+ indices = np.array(case, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
@@ -137,7 +137,7 @@ def test_maybe_indices_to_slice_both_edges(self):
# slice
for step in [1, 2, 4, 5, 8, 9]:
- indices = np.arange(0, 9, step, dtype=np.int64)
+ indices = np.arange(0, 9, step, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
@@ -150,7 +150,7 @@ def test_maybe_indices_to_slice_both_edges(self):
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
- indices = np.array(case, dtype=np.int64)
+ indices = np.array(case, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
@@ -162,7 +162,7 @@ def test_maybe_indices_to_slice_middle(self):
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
- indices = np.arange(start, end, step, dtype=np.int64)
+ indices = np.arange(start, end, step, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
@@ -177,7 +177,7 @@ def test_maybe_indices_to_slice_middle(self):
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
- indices = np.array(case, dtype=np.int64)
+ indices = np.array(case, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37473 | 2020-10-28T17:21:27Z | 2020-10-29T21:46:29Z | 2020-10-29T21:46:29Z | 2020-10-29T22:03:06Z |
TST: collect tests by method from test_api | diff --git a/pandas/tests/series/methods/test_copy.py b/pandas/tests/series/methods/test_copy.py
new file mode 100644
index 0000000000000..6201c0f5f7c29
--- /dev/null
+++ b/pandas/tests/series/methods/test_copy.py
@@ -0,0 +1,71 @@
+import numpy as np
+import pytest
+
+from pandas import Series, Timestamp
+import pandas._testing as tm
+
+
+class TestCopy:
+ @pytest.mark.parametrize("deep", [None, False, True])
+ def test_copy(self, deep):
+
+ ser = Series(np.arange(10), dtype="float64")
+
+ # default deep is True
+ if deep is None:
+ ser2 = ser.copy()
+ else:
+ ser2 = ser.copy(deep=deep)
+
+ ser2[::2] = np.NaN
+
+ if deep is None or deep is True:
+ # Did not modify original Series
+ assert np.isnan(ser2[0])
+ assert not np.isnan(ser[0])
+ else:
+ # we DID modify the original Series
+ assert np.isnan(ser2[0])
+ assert np.isnan(ser[0])
+
+ @pytest.mark.parametrize("deep", [None, False, True])
+ def test_copy_tzaware(self, deep):
+ # GH#11794
+ # copy of tz-aware
+ expected = Series([Timestamp("2012/01/01", tz="UTC")])
+ expected2 = Series([Timestamp("1999/01/01", tz="UTC")])
+
+ ser = Series([Timestamp("2012/01/01", tz="UTC")])
+
+ if deep is None:
+ ser2 = ser.copy()
+ else:
+ ser2 = ser.copy(deep=deep)
+
+ ser2[0] = Timestamp("1999/01/01", tz="UTC")
+
+ # default deep is True
+ if deep is None or deep is True:
+ # Did not modify original Series
+ tm.assert_series_equal(ser2, expected2)
+ tm.assert_series_equal(ser, expected)
+ else:
+ # we DID modify the original Series
+ tm.assert_series_equal(ser2, expected2)
+ tm.assert_series_equal(ser, expected2)
+
+ def test_copy_name(self, datetime_series):
+ result = datetime_series.copy()
+ assert result.name == datetime_series.name
+
+ def test_copy_index_name_checking(self, datetime_series):
+ # don't want to be able to modify the index stored elsewhere after
+ # making a copy
+
+ datetime_series.index.name = None
+ assert datetime_series.index.name is None
+ assert datetime_series is datetime_series
+
+ cp = datetime_series.copy()
+ cp.index.name = "foo"
+ assert datetime_series.index.name is None
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index c80a2f7cba9ad..c948af41c5e8f 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -1,4 +1,3 @@
-from collections import OrderedDict
import pydoc
import warnings
@@ -25,67 +24,12 @@
import pandas._testing as tm
from pandas.core.arrays import PeriodArray
-import pandas.io.formats.printing as printing
-
class TestSeriesMisc:
- def test_scalarop_preserve_name(self, datetime_series):
- result = datetime_series * 2
- assert result.name == datetime_series.name
-
- def test_copy_name(self, datetime_series):
- result = datetime_series.copy()
- assert result.name == datetime_series.name
-
- def test_copy_index_name_checking(self, datetime_series):
- # don't want to be able to modify the index stored elsewhere after
- # making a copy
-
- datetime_series.index.name = None
- assert datetime_series.index.name is None
- assert datetime_series is datetime_series
-
- cp = datetime_series.copy()
- cp.index.name = "foo"
- printing.pprint_thing(datetime_series.index.name)
- assert datetime_series.index.name is None
-
def test_append_preserve_name(self, datetime_series):
result = datetime_series[:5].append(datetime_series[5:])
assert result.name == datetime_series.name
- def test_binop_maybe_preserve_name(self, datetime_series):
- # names match, preserve
- result = datetime_series * datetime_series
- assert result.name == datetime_series.name
- result = datetime_series.mul(datetime_series)
- assert result.name == datetime_series.name
-
- result = datetime_series * datetime_series[:-2]
- assert result.name == datetime_series.name
-
- # names don't match, don't preserve
- cp = datetime_series.copy()
- cp.name = "something else"
- result = datetime_series + cp
- assert result.name is None
- result = datetime_series.add(cp)
- assert result.name is None
-
- ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]
- ops = ops + ["r" + op for op in ops]
- for op in ops:
- # names match, preserve
- s = datetime_series.copy()
- result = getattr(s, op)(s)
- assert result.name == datetime_series.name
-
- # names don't match, don't preserve
- cp = datetime_series.copy()
- cp.name = "changed"
- result = getattr(s, op)(cp)
- assert result.name is None
-
def test_getitem_preserve_name(self, datetime_series):
result = datetime_series[datetime_series > 0]
assert result.name == datetime_series.name
@@ -111,73 +55,6 @@ def _pickle_roundtrip(self, obj):
unpickled = pd.read_pickle(path)
return unpickled
- def test_constructor_dict(self):
- d = {"a": 0.0, "b": 1.0, "c": 2.0}
- result = Series(d)
- expected = Series(d, index=sorted(d.keys()))
- tm.assert_series_equal(result, expected)
-
- result = Series(d, index=["b", "c", "d", "a"])
- expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
- tm.assert_series_equal(result, expected)
-
- def test_constructor_subclass_dict(self, dict_subclass):
- data = dict_subclass((x, 10.0 * x) for x in range(10))
- series = Series(data)
- expected = Series(dict(data.items()))
- tm.assert_series_equal(series, expected)
-
- def test_constructor_ordereddict(self):
- # GH3283
- data = OrderedDict((f"col{i}", np.random.random()) for i in range(12))
-
- series = Series(data)
- expected = Series(list(data.values()), list(data.keys()))
- tm.assert_series_equal(series, expected)
-
- # Test with subclass
- class A(OrderedDict):
- pass
-
- series = Series(A(data))
- tm.assert_series_equal(series, expected)
-
- def test_constructor_dict_multiindex(self):
- d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}
- _d = sorted(d.items())
- result = Series(d)
- expected = Series(
- [x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d])
- )
- tm.assert_series_equal(result, expected)
-
- d["z"] = 111.0
- _d.insert(0, ("z", d["z"]))
- result = Series(d)
- expected = Series(
- [x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False)
- )
- result = result.reindex(index=expected.index)
- tm.assert_series_equal(result, expected)
-
- def test_constructor_dict_timedelta_index(self):
- # GH #12169 : Resample category data with timedelta index
- # construct Series from dict as data and TimedeltaIndex as index
- # will result NaN in result Series data
- expected = Series(
- data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")
- )
-
- result = Series(
- data={
- pd.to_timedelta(0, unit="s"): "A",
- pd.to_timedelta(10, unit="s"): "B",
- pd.to_timedelta(20, unit="s"): "C",
- },
- index=pd.to_timedelta([0, 10, 20], unit="s"),
- )
- tm.assert_series_equal(result, expected)
-
def test_sparse_accessor_updates_on_inplace(self):
s = Series([1, 1, 2, 3], dtype="Sparse[int]")
return_value = s.drop([0, 1], inplace=True)
@@ -324,55 +201,6 @@ def test_raise_on_info(self):
with pytest.raises(AttributeError, match=msg):
s.info()
- def test_copy(self):
-
- for deep in [None, False, True]:
- s = Series(np.arange(10), dtype="float64")
-
- # default deep is True
- if deep is None:
- s2 = s.copy()
- else:
- s2 = s.copy(deep=deep)
-
- s2[::2] = np.NaN
-
- if deep is None or deep is True:
- # Did not modify original Series
- assert np.isnan(s2[0])
- assert not np.isnan(s[0])
- else:
- # we DID modify the original Series
- assert np.isnan(s2[0])
- assert np.isnan(s[0])
-
- def test_copy_tzaware(self):
- # GH#11794
- # copy of tz-aware
- expected = Series([Timestamp("2012/01/01", tz="UTC")])
- expected2 = Series([Timestamp("1999/01/01", tz="UTC")])
-
- for deep in [None, False, True]:
-
- s = Series([Timestamp("2012/01/01", tz="UTC")])
-
- if deep is None:
- s2 = s.copy()
- else:
- s2 = s.copy(deep=deep)
-
- s2[0] = pd.Timestamp("1999/01/01", tz="UTC")
-
- # default deep is True
- if deep is None or deep is True:
- # Did not modify original Series
- tm.assert_series_equal(s2, expected2)
- tm.assert_series_equal(s, expected)
- else:
- # we DID modify the original Series
- tm.assert_series_equal(s2, expected2)
- tm.assert_series_equal(s, expected2)
-
def test_axis_alias(self):
s = Series([1, 2, np.nan])
tm.assert_series_equal(s.dropna(axis="rows"), s.dropna(axis="index"))
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 00e6fb01da424..ae6a4df8ba699 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -741,3 +741,41 @@ def test_series_ops_name_retention(flex, box, names, all_binary_operators):
assert result.name == names[2]
else:
assert result.name == names[0]
+
+
+class TestNamePreservation:
+ def test_binop_maybe_preserve_name(self, datetime_series):
+ # names match, preserve
+ result = datetime_series * datetime_series
+ assert result.name == datetime_series.name
+ result = datetime_series.mul(datetime_series)
+ assert result.name == datetime_series.name
+
+ result = datetime_series * datetime_series[:-2]
+ assert result.name == datetime_series.name
+
+ # names don't match, don't preserve
+ cp = datetime_series.copy()
+ cp.name = "something else"
+ result = datetime_series + cp
+ assert result.name is None
+ result = datetime_series.add(cp)
+ assert result.name is None
+
+ ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]
+ ops = ops + ["r" + op for op in ops]
+ for op in ops:
+ # names match, preserve
+ ser = datetime_series.copy()
+ result = getattr(ser, op)(ser)
+ assert result.name == datetime_series.name
+
+ # names don't match, don't preserve
+ cp = datetime_series.copy()
+ cp.name = "changed"
+ result = getattr(ser, op)(cp)
+ assert result.name is None
+
+ def test_scalarop_preserve_name(self, datetime_series):
+ result = datetime_series * 2
+ assert result.name == datetime_series.name
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index adbdf1077113e..a2fab5c7e0f0e 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1062,6 +1062,11 @@ def test_constructor_periodindex(self):
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
+
+ result = Series(d)
+ expected = Series(d, index=sorted(d.keys()))
+ tm.assert_series_equal(result, expected)
+
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
@@ -1526,6 +1531,63 @@ def test_constructor_list_of_periods_infers_period_dtype(self):
)
assert series.dtype == "Period[D]"
+ def test_constructor_subclass_dict(self, dict_subclass):
+ data = dict_subclass((x, 10.0 * x) for x in range(10))
+ series = Series(data)
+ expected = Series(dict(data.items()))
+ tm.assert_series_equal(series, expected)
+
+ def test_constructor_ordereddict(self):
+ # GH3283
+ data = OrderedDict((f"col{i}", np.random.random()) for i in range(12))
+
+ series = Series(data)
+ expected = Series(list(data.values()), list(data.keys()))
+ tm.assert_series_equal(series, expected)
+
+ # Test with subclass
+ class A(OrderedDict):
+ pass
+
+ series = Series(A(data))
+ tm.assert_series_equal(series, expected)
+
+ def test_constructor_dict_multiindex(self):
+ d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}
+ _d = sorted(d.items())
+ result = Series(d)
+ expected = Series(
+ [x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d])
+ )
+ tm.assert_series_equal(result, expected)
+
+ d["z"] = 111.0
+ _d.insert(0, ("z", d["z"]))
+ result = Series(d)
+ expected = Series(
+ [x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False)
+ )
+ result = result.reindex(index=expected.index)
+ tm.assert_series_equal(result, expected)
+
+ def test_constructor_dict_timedelta_index(self):
+ # GH #12169 : Resample category data with timedelta index
+ # construct Series from dict as data and TimedeltaIndex as index
+ # will result NaN in result Series data
+ expected = Series(
+ data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")
+ )
+
+ result = Series(
+ data={
+ pd.to_timedelta(0, unit="s"): "A",
+ pd.to_timedelta(10, unit="s"): "B",
+ pd.to_timedelta(20, unit="s"): "C",
+ },
+ index=pd.to_timedelta([0, 10, 20], unit="s"),
+ )
+ tm.assert_series_equal(result, expected)
+
class TestSeriesConstructorIndexCoercion:
def test_series_constructor_datetimelike_index_coercion(self):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37472 | 2020-10-28T17:11:17Z | 2020-10-29T01:04:18Z | 2020-10-29T01:04:18Z | 2020-10-29T01:59:23Z |
Backport PR #35875 on branch 1.1.x (COMPAT: Ensure rolling indexers return intp during take operations) | diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index 7cbe34cdebf9f..7c76a8e2a0b22 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -7,6 +7,8 @@
from pandas._libs.window.indexers import calculate_variable_window_bounds
from pandas.util._decorators import Appender
+from pandas.core.dtypes.common import ensure_platform_int
+
from pandas.tseries.offsets import Nano
get_window_bounds_doc = """
@@ -296,9 +298,9 @@ def get_window_bounds(
start_arrays = []
end_arrays = []
window_indicies_start = 0
- for key, indicies in self.groupby_indicies.items():
+ for key, indices in self.groupby_indicies.items():
if self.index_array is not None:
- index_array = self.index_array.take(indicies)
+ index_array = self.index_array.take(ensure_platform_int(indices))
else:
index_array = self.index_array
indexer = self.rolling_indexer(
@@ -307,22 +309,22 @@ def get_window_bounds(
**self.indexer_kwargs,
)
start, end = indexer.get_window_bounds(
- len(indicies), min_periods, center, closed
+ len(indices), min_periods, center, closed
)
start = start.astype(np.int64)
end = end.astype(np.int64)
# Cannot use groupby_indicies as they might not be monotonic with the object
# we're rolling over
window_indicies = np.arange(
- window_indicies_start, window_indicies_start + len(indicies),
+ window_indicies_start, window_indicies_start + len(indices),
)
- window_indicies_start += len(indicies)
+ window_indicies_start += len(indices)
# Extend as we'll be slicing window like [start, end)
window_indicies = np.append(
window_indicies, [window_indicies[-1] + 1]
).astype(np.int64)
- start_arrays.append(window_indicies.take(start))
- end_arrays.append(window_indicies.take(end))
+ start_arrays.append(window_indicies.take(ensure_platform_int(start)))
+ end_arrays.append(window_indicies.take(ensure_platform_int(end)))
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
# GH 35552: Need to adjust start and end based on the nans appended to values
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index a7232dd5f8a1e..6ca965630248f 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -6,7 +6,7 @@
from pandas.util._test_decorators import async_mark
import pandas as pd
-from pandas import DataFrame, Series, Timestamp, compat
+from pandas import DataFrame, Series, Timestamp
import pandas._testing as tm
from pandas.core.indexes.datetimes import date_range
@@ -317,7 +317,6 @@ def test_resample_groupby_with_label():
tm.assert_frame_equal(result, expected)
-@pytest.mark.xfail(not compat.IS64, reason="GH-35148")
def test_consistency_with_window():
# consistent return values with window
diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py
index 28e27791cad35..2c3d8b4608806 100644
--- a/pandas/tests/window/test_api.py
+++ b/pandas/tests/window/test_api.py
@@ -6,7 +6,7 @@
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import DataFrame, Index, Series, Timestamp, compat, concat
+from pandas import DataFrame, Index, Series, Timestamp, concat
import pandas._testing as tm
from pandas.core.base import SpecificationError
@@ -277,7 +277,7 @@ def test_preserve_metadata():
@pytest.mark.parametrize(
"func,window_size,expected_vals",
[
- pytest.param(
+ (
"rolling",
2,
[
@@ -289,7 +289,6 @@ def test_preserve_metadata():
[35.0, 40.0, 60.0, 40.0],
[60.0, 80.0, 85.0, 80],
],
- marks=pytest.mark.xfail(not compat.IS64, reason="GH-35294"),
),
(
"expanding",
diff --git a/pandas/tests/window/test_apply.py b/pandas/tests/window/test_apply.py
index 2aaf6af103e98..bc38634da8941 100644
--- a/pandas/tests/window/test_apply.py
+++ b/pandas/tests/window/test_apply.py
@@ -4,7 +4,7 @@
from pandas.errors import NumbaUtilError
import pandas.util._test_decorators as td
-from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range
+from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range
import pandas._testing as tm
@@ -142,7 +142,6 @@ def test_invalid_kwargs_nopython():
@pytest.mark.parametrize("args_kwargs", [[None, {"par": 10}], [(10,), None]])
-@pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_rolling_apply_args_kwargs(args_kwargs):
# GH 33433
def foo(x, par):
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 074cdb307f7ee..493a844ca7a44 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, Series, compat
+from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.groupby.groupby import get_groupby
@@ -23,7 +23,6 @@ def test_mutated(self):
g = get_groupby(self.frame, by="A", mutated=True)
assert g.mutated
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_getitem(self):
g = self.frame.groupby("A")
g_mutated = get_groupby(self.frame, by="A", mutated=True)
@@ -56,7 +55,6 @@ def test_getitem_multiple(self):
result = r.B.count()
tm.assert_series_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_rolling(self):
g = self.frame.groupby("A")
r = g.rolling(window=4)
@@ -74,7 +72,6 @@ def test_rolling(self):
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]
)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_rolling_quantile(self, interpolation):
g = self.frame.groupby("A")
r = g.rolling(window=4)
@@ -105,7 +102,6 @@ def func(x):
expected = g.apply(func)
tm.assert_series_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_rolling_apply(self, raw):
g = self.frame.groupby("A")
r = g.rolling(window=4)
@@ -115,7 +111,6 @@ def test_rolling_apply(self, raw):
expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_rolling_apply_mutability(self):
# GH 14013
df = pd.DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6})
@@ -197,7 +192,6 @@ def test_expanding_apply(self, raw):
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]])
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_groupby_rolling(self, expected_value, raw_value):
# GH 31754
@@ -215,7 +209,6 @@ def foo(x):
)
tm.assert_series_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_groupby_rolling_center_center(self):
# GH 35552
series = Series(range(1, 6))
@@ -281,7 +274,6 @@ def test_groupby_rolling_center_center(self):
)
tm.assert_frame_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_groupby_subselect_rolling(self):
# GH 35486
df = DataFrame(
@@ -307,7 +299,6 @@ def test_groupby_subselect_rolling(self):
)
tm.assert_series_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_groupby_rolling_custom_indexer(self):
# GH 35557
class SimpleIndexer(pd.api.indexers.BaseIndexer):
@@ -331,7 +322,6 @@ def get_window_bounds(
expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum()
tm.assert_frame_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_groupby_rolling_subset_with_closed(self):
# GH 35549
df = pd.DataFrame(
@@ -356,7 +346,6 @@ def test_groupby_rolling_subset_with_closed(self):
)
tm.assert_series_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_groupby_subset_rolling_subset_with_closed(self):
# GH 35549
df = pd.DataFrame(
@@ -384,7 +373,6 @@ def test_groupby_subset_rolling_subset_with_closed(self):
)
tm.assert_series_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
@pytest.mark.parametrize("func", ["max", "min"])
def test_groupby_rolling_index_changed(self, func):
# GH: #36018 nlevels of MultiIndex changed
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index bea239a245a4f..8d72e2cb92ca9 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -7,7 +7,7 @@
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import DataFrame, Series, compat, date_range
+from pandas import DataFrame, Series, date_range
import pandas._testing as tm
from pandas.core.window import Rolling
@@ -150,7 +150,6 @@ def test_closed_one_entry(func):
@pytest.mark.parametrize("func", ["min", "max"])
-@pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_closed_one_entry_groupby(func):
# GH24718
ser = pd.DataFrame(
@@ -683,7 +682,6 @@ def test_iter_rolling_datetime(expected, expected_index, window):
),
],
)
-@pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_rolling_positional_argument(grouping, _index, raw):
# GH 34605
diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py
index 90f919d5565b0..8aa4d7103e48a 100644
--- a/pandas/tests/window/test_timeseries_window.py
+++ b/pandas/tests/window/test_timeseries_window.py
@@ -7,7 +7,6 @@
MultiIndex,
Series,
Timestamp,
- compat,
date_range,
to_datetime,
)
@@ -657,7 +656,6 @@ def agg_by_day(x):
tm.assert_frame_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_groupby_monotonic(self):
# GH 15130
@@ -687,7 +685,6 @@ def test_groupby_monotonic(self):
result = df.groupby("name").rolling("180D", on="date")["amount"].sum()
tm.assert_series_equal(result, expected)
- @pytest.mark.xfail(not compat.IS64, reason="GH-35294")
def test_non_monotonic(self):
# GH 13966 (similar to #15130, closed by #15175)
| Backport PR #35875: COMPAT: Ensure rolling indexers return intp during take operations | https://api.github.com/repos/pandas-dev/pandas/pulls/37471 | 2020-10-28T15:57:14Z | 2020-10-28T17:58:00Z | 2020-10-28T17:58:00Z | 2020-10-28T17:58:00Z |
TST/REF: finish collecting sample tests | diff --git a/pandas/tests/frame/methods/test_sample.py b/pandas/tests/frame/methods/test_sample.py
deleted file mode 100644
index 843c44bcf1471..0000000000000
--- a/pandas/tests/frame/methods/test_sample.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas.compat.numpy import np_version_under1p17
-
-from pandas import DataFrame
-import pandas._testing as tm
-import pandas.core.common as com
-
-
-class TestSample:
- @pytest.mark.parametrize(
- "func_str,arg",
- [
- ("np.array", [2, 3, 1, 0]),
- pytest.param(
- "np.random.MT19937",
- 3,
- marks=pytest.mark.skipif(np_version_under1p17, reason="NumPy<1.17"),
- ),
- pytest.param(
- "np.random.PCG64",
- 11,
- marks=pytest.mark.skipif(np_version_under1p17, reason="NumPy<1.17"),
- ),
- ],
- )
- def test_sample_random_state(self, func_str, arg):
- # GH#32503
- df = DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
- result = df.sample(n=3, random_state=eval(func_str)(arg))
- expected = df.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
- tm.assert_frame_equal(result, expected)
-
- def test_sample_upsampling_without_replacement(self):
- # GH#27451
-
- df = DataFrame({"A": list("abc")})
- msg = (
- "Replace has to be set to `True` when "
- "upsampling the population `frac` > 1."
- )
- with pytest.raises(ValueError, match=msg):
- df.sample(frac=2, replace=False)
-
- def test_sample_is_copy(self):
- # GH#27357, GH#30784: ensure the result of sample is an actual copy and
- # doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
- df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
- df2 = df.sample(3)
-
- with tm.assert_produces_warning(None):
- df2["d"] = 1
diff --git a/pandas/tests/generic/methods/test_sample.py b/pandas/tests/generic/methods/test_sample.py
new file mode 100644
index 0000000000000..7303dad9170ed
--- /dev/null
+++ b/pandas/tests/generic/methods/test_sample.py
@@ -0,0 +1,309 @@
+import numpy as np
+import pytest
+
+from pandas.compat.numpy import np_version_under1p17
+
+from pandas import DataFrame, Series
+import pandas._testing as tm
+import pandas.core.common as com
+
+
+class TestSample:
+ @pytest.fixture(params=[Series, DataFrame])
+ def obj(self, request):
+ klass = request.param
+ if klass is Series:
+ arr = np.random.randn(10)
+ else:
+ arr = np.random.randn(10, 10)
+ return klass(arr, dtype=None)
+
+ @pytest.mark.parametrize("test", list(range(10)))
+ def test_sample(self, test, obj):
+ # Fixes issue: 2419
+ # Check behavior of random_state argument
+ # Check for stability when receives seed or random state -- run 10
+ # times.
+
+ seed = np.random.randint(0, 100)
+ tm.assert_equal(
+ obj.sample(n=4, random_state=seed), obj.sample(n=4, random_state=seed)
+ )
+
+ tm.assert_equal(
+ obj.sample(frac=0.7, random_state=seed),
+ obj.sample(frac=0.7, random_state=seed),
+ )
+
+ tm.assert_equal(
+ obj.sample(n=4, random_state=np.random.RandomState(test)),
+ obj.sample(n=4, random_state=np.random.RandomState(test)),
+ )
+
+ tm.assert_equal(
+ obj.sample(frac=0.7, random_state=np.random.RandomState(test)),
+ obj.sample(frac=0.7, random_state=np.random.RandomState(test)),
+ )
+
+ tm.assert_equal(
+ obj.sample(frac=2, replace=True, random_state=np.random.RandomState(test)),
+ obj.sample(frac=2, replace=True, random_state=np.random.RandomState(test)),
+ )
+
+ os1, os2 = [], []
+ for _ in range(2):
+ np.random.seed(test)
+ os1.append(obj.sample(n=4))
+ os2.append(obj.sample(frac=0.7))
+ tm.assert_equal(*os1)
+ tm.assert_equal(*os2)
+
+ def test_sample_lengths(self, obj):
+ # Check lengths are right
+ assert len(obj.sample(n=4) == 4)
+ assert len(obj.sample(frac=0.34) == 3)
+ assert len(obj.sample(frac=0.36) == 4)
+
+ def test_sample_invalid_random_state(self, obj):
+ # Check for error when random_state argument invalid.
+ with pytest.raises(ValueError):
+ obj.sample(random_state="astring!")
+
+ def test_sample_wont_accept_n_and_frac(self, obj):
+ # Giving both frac and N throws error
+ with pytest.raises(ValueError):
+ obj.sample(n=3, frac=0.3)
+
+ def test_sample_requires_positive_n_frac(self, obj):
+ with pytest.raises(ValueError):
+ obj.sample(n=-3)
+ with pytest.raises(ValueError):
+ obj.sample(frac=-0.3)
+
+ def test_sample_requires_integer_n(self, obj):
+ # Make sure float values of `n` give error
+ with pytest.raises(ValueError):
+ obj.sample(n=3.2)
+
+ def test_sample_invalid_weight_lengths(self, obj):
+ # Weight length must be right
+ with pytest.raises(ValueError):
+ obj.sample(n=3, weights=[0, 1])
+
+ with pytest.raises(ValueError):
+ bad_weights = [0.5] * 11
+ obj.sample(n=3, weights=bad_weights)
+
+ with pytest.raises(ValueError):
+ bad_weight_series = Series([0, 0, 0.2])
+ obj.sample(n=4, weights=bad_weight_series)
+
+ def test_sample_negative_weights(self, obj):
+ # Check won't accept negative weights
+ with pytest.raises(ValueError):
+ bad_weights = [-0.1] * 10
+ obj.sample(n=3, weights=bad_weights)
+
+ def test_sample_inf_weights(self, obj):
+ # Check inf and -inf throw errors:
+
+ with pytest.raises(ValueError):
+ weights_with_inf = [0.1] * 10
+ weights_with_inf[0] = np.inf
+ obj.sample(n=3, weights=weights_with_inf)
+
+ with pytest.raises(ValueError):
+ weights_with_ninf = [0.1] * 10
+ weights_with_ninf[0] = -np.inf
+ obj.sample(n=3, weights=weights_with_ninf)
+
+ def test_sample_zero_weights(self, obj):
+ # All zeros raises errors
+
+ zero_weights = [0] * 10
+ with pytest.raises(ValueError):
+ obj.sample(n=3, weights=zero_weights)
+
+ def test_sample_missing_weights(self, obj):
+ # All missing weights
+
+ nan_weights = [np.nan] * 10
+ with pytest.raises(ValueError):
+ obj.sample(n=3, weights=nan_weights)
+
+ def test_sample_none_weights(self, obj):
+ # Check None are also replaced by zeros.
+ weights_with_None = [None] * 10
+ weights_with_None[5] = 0.5
+ tm.assert_equal(
+ obj.sample(n=1, axis=0, weights=weights_with_None), obj.iloc[5:6]
+ )
+
+ @pytest.mark.parametrize(
+ "func_str,arg",
+ [
+ ("np.array", [2, 3, 1, 0]),
+ pytest.param(
+ "np.random.MT19937",
+ 3,
+ marks=pytest.mark.skipif(np_version_under1p17, reason="NumPy<1.17"),
+ ),
+ pytest.param(
+ "np.random.PCG64",
+ 11,
+ marks=pytest.mark.skipif(np_version_under1p17, reason="NumPy<1.17"),
+ ),
+ ],
+ )
+ @pytest.mark.parametrize("klass", [Series, DataFrame])
+ def test_sample_random_state(self, func_str, arg, klass):
+ # GH#32503
+ obj = DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
+ if klass is Series:
+ obj = obj["col1"]
+ result = obj.sample(n=3, random_state=eval(func_str)(arg))
+ expected = obj.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
+ tm.assert_equal(result, expected)
+
+ @pytest.mark.parametrize("klass", [Series, DataFrame])
+ def test_sample_upsampling_without_replacement(self, klass):
+ # GH#27451
+
+ obj = DataFrame({"A": list("abc")})
+ if klass is Series:
+ obj = obj["A"]
+
+ msg = (
+ "Replace has to be set to `True` when "
+ "upsampling the population `frac` > 1."
+ )
+ with pytest.raises(ValueError, match=msg):
+ obj.sample(frac=2, replace=False)
+
+
+class TestSampleDataFrame:
+ # Tests which are relevant only for DataFrame, so these are
+ # as fully parametrized as they can get.
+
+ def test_sample(self):
+ # GH#2419
+ # additional specific object based tests
+
+ # A few dataframe test with degenerate weights.
+ easy_weight_list = [0] * 10
+ easy_weight_list[5] = 1
+
+ df = DataFrame(
+ {
+ "col1": range(10, 20),
+ "col2": range(20, 30),
+ "colString": ["a"] * 10,
+ "easyweights": easy_weight_list,
+ }
+ )
+ sample1 = df.sample(n=1, weights="easyweights")
+ tm.assert_frame_equal(sample1, df.iloc[5:6])
+
+ # Ensure proper error if string given as weight for Series or
+ # DataFrame with axis = 1.
+ ser = Series(range(10))
+ with pytest.raises(ValueError):
+ ser.sample(n=3, weights="weight_column")
+
+ with pytest.raises(ValueError):
+ df.sample(n=1, weights="weight_column", axis=1)
+
+ # Check weighting key error
+ with pytest.raises(
+ KeyError, match="'String passed to weights not a valid column'"
+ ):
+ df.sample(n=3, weights="not_a_real_column_name")
+
+ # Check that re-normalizes weights that don't sum to one.
+ weights_less_than_1 = [0] * 10
+ weights_less_than_1[0] = 0.5
+ tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
+
+ ###
+ # Test axis argument
+ ###
+
+ # Test axis argument
+ df = DataFrame({"col1": range(10), "col2": ["a"] * 10})
+ second_column_weight = [0, 1]
+ tm.assert_frame_equal(
+ df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]]
+ )
+
+ # Different axis arg types
+ tm.assert_frame_equal(
+ df.sample(n=1, axis="columns", weights=second_column_weight), df[["col2"]]
+ )
+
+ weight = [0] * 10
+ weight[5] = 0.5
+ tm.assert_frame_equal(df.sample(n=1, axis="rows", weights=weight), df.iloc[5:6])
+ tm.assert_frame_equal(
+ df.sample(n=1, axis="index", weights=weight), df.iloc[5:6]
+ )
+
+ # Check out of range axis values
+ with pytest.raises(ValueError):
+ df.sample(n=1, axis=2)
+
+ with pytest.raises(ValueError):
+ df.sample(n=1, axis="not_a_name")
+
+ with pytest.raises(ValueError):
+ ser = Series(range(10))
+ ser.sample(n=1, axis=1)
+
+ # Test weight length compared to correct axis
+ with pytest.raises(ValueError):
+ df.sample(n=1, axis=1, weights=[0.5] * 10)
+
+ def test_sample_axis1(self):
+ # Check weights with axis = 1
+ easy_weight_list = [0] * 3
+ easy_weight_list[2] = 1
+
+ df = DataFrame(
+ {"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
+ )
+ sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
+ tm.assert_frame_equal(sample1, df[["colString"]])
+
+ # Test default axes
+ tm.assert_frame_equal(
+ df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)
+ )
+
+ def test_sample_aligns_weights_with_frame(self):
+
+ # Test that function aligns weights with frame
+ df = DataFrame({"col1": [5, 6, 7], "col2": ["a", "b", "c"]}, index=[9, 5, 3])
+ ser = Series([1, 0, 0], index=[3, 5, 9])
+ tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser))
+
+ # Weights have index values to be dropped because not in
+ # sampled DataFrame
+ ser2 = Series([0.001, 0, 10000], index=[3, 5, 10])
+ tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser2))
+
+ # Weights have empty values to be filed with zeros
+ ser3 = Series([0.01, 0], index=[3, 5])
+ tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser3))
+
+ # No overlap in weight and sampled DataFrame indices
+ ser4 = Series([1, 0], index=[1, 2])
+ with pytest.raises(ValueError):
+ df.sample(1, weights=ser4)
+
+ def test_sample_is_copy(self):
+ # GH#27357, GH#30784: ensure the result of sample is an actual copy and
+ # doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
+ df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
+ df2 = df.sample(3)
+
+ with tm.assert_produces_warning(None):
+ df2["d"] = 1
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 335f5125faa91..cccae1babe82f 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -273,134 +273,6 @@ def test_head_tail(self, index):
self._compare(o.head(-3), o.head(len(index) - 3))
self._compare(o.tail(-3), o.tail(len(index) - 3))
- def test_sample(self):
- # Fixes issue: 2419
-
- o = self._construct(shape=10)
-
- ###
- # Check behavior of random_state argument
- ###
-
- # Check for stability when receives seed or random state -- run 10
- # times.
- for test in range(10):
- seed = np.random.randint(0, 100)
- self._compare(
- o.sample(n=4, random_state=seed), o.sample(n=4, random_state=seed)
- )
-
- self._compare(
- o.sample(frac=0.7, random_state=seed),
- o.sample(frac=0.7, random_state=seed),
- )
-
- self._compare(
- o.sample(n=4, random_state=np.random.RandomState(test)),
- o.sample(n=4, random_state=np.random.RandomState(test)),
- )
-
- self._compare(
- o.sample(frac=0.7, random_state=np.random.RandomState(test)),
- o.sample(frac=0.7, random_state=np.random.RandomState(test)),
- )
-
- self._compare(
- o.sample(
- frac=2, replace=True, random_state=np.random.RandomState(test)
- ),
- o.sample(
- frac=2, replace=True, random_state=np.random.RandomState(test)
- ),
- )
-
- os1, os2 = [], []
- for _ in range(2):
- np.random.seed(test)
- os1.append(o.sample(n=4))
- os2.append(o.sample(frac=0.7))
- self._compare(*os1)
- self._compare(*os2)
-
- # Check for error when random_state argument invalid.
- with pytest.raises(ValueError):
- o.sample(random_state="astring!")
-
- ###
- # Check behavior of `frac` and `N`
- ###
-
- # Giving both frac and N throws error
- with pytest.raises(ValueError):
- o.sample(n=3, frac=0.3)
-
- # Check that raises right error for negative lengths
- with pytest.raises(ValueError):
- o.sample(n=-3)
- with pytest.raises(ValueError):
- o.sample(frac=-0.3)
-
- # Make sure float values of `n` give error
- with pytest.raises(ValueError):
- o.sample(n=3.2)
-
- # Check lengths are right
- assert len(o.sample(n=4) == 4)
- assert len(o.sample(frac=0.34) == 3)
- assert len(o.sample(frac=0.36) == 4)
-
- ###
- # Check weights
- ###
-
- # Weight length must be right
- with pytest.raises(ValueError):
- o.sample(n=3, weights=[0, 1])
-
- with pytest.raises(ValueError):
- bad_weights = [0.5] * 11
- o.sample(n=3, weights=bad_weights)
-
- with pytest.raises(ValueError):
- bad_weight_series = Series([0, 0, 0.2])
- o.sample(n=4, weights=bad_weight_series)
-
- # Check won't accept negative weights
- with pytest.raises(ValueError):
- bad_weights = [-0.1] * 10
- o.sample(n=3, weights=bad_weights)
-
- # Check inf and -inf throw errors:
- with pytest.raises(ValueError):
- weights_with_inf = [0.1] * 10
- weights_with_inf[0] = np.inf
- o.sample(n=3, weights=weights_with_inf)
-
- with pytest.raises(ValueError):
- weights_with_ninf = [0.1] * 10
- weights_with_ninf[0] = -np.inf
- o.sample(n=3, weights=weights_with_ninf)
-
- # All zeros raises errors
- zero_weights = [0] * 10
- with pytest.raises(ValueError):
- o.sample(n=3, weights=zero_weights)
-
- # All missing weights
- nan_weights = [np.nan] * 10
- with pytest.raises(ValueError):
- o.sample(n=3, weights=nan_weights)
-
- # Check np.nan are replaced by zeros.
- weights_with_nan = [np.nan] * 10
- weights_with_nan[5] = 0.5
- self._compare(o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
-
- # Check None are also replaced by zeros.
- weights_with_None = [None] * 10
- weights_with_None[5] = 0.5
- self._compare(o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
-
def test_size_compat(self):
# GH8846
# size property should be defined
@@ -511,117 +383,6 @@ def test_pct_change(self, periods, fill_method, limit, exp):
class TestNDFrame:
# tests that don't fit elsewhere
- def test_sample(self):
- # Fixes issue: 2419
- # additional specific object based tests
-
- # A few dataframe test with degenerate weights.
- easy_weight_list = [0] * 10
- easy_weight_list[5] = 1
-
- df = DataFrame(
- {
- "col1": range(10, 20),
- "col2": range(20, 30),
- "colString": ["a"] * 10,
- "easyweights": easy_weight_list,
- }
- )
- sample1 = df.sample(n=1, weights="easyweights")
- tm.assert_frame_equal(sample1, df.iloc[5:6])
-
- # Ensure proper error if string given as weight for Series or
- # DataFrame with axis = 1.
- s = Series(range(10))
- with pytest.raises(ValueError):
- s.sample(n=3, weights="weight_column")
-
- with pytest.raises(ValueError):
- df.sample(n=1, weights="weight_column", axis=1)
-
- # Check weighting key error
- with pytest.raises(
- KeyError, match="'String passed to weights not a valid column'"
- ):
- df.sample(n=3, weights="not_a_real_column_name")
-
- # Check that re-normalizes weights that don't sum to one.
- weights_less_than_1 = [0] * 10
- weights_less_than_1[0] = 0.5
- tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
-
- ###
- # Test axis argument
- ###
-
- # Test axis argument
- df = DataFrame({"col1": range(10), "col2": ["a"] * 10})
- second_column_weight = [0, 1]
- tm.assert_frame_equal(
- df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]]
- )
-
- # Different axis arg types
- tm.assert_frame_equal(
- df.sample(n=1, axis="columns", weights=second_column_weight), df[["col2"]]
- )
-
- weight = [0] * 10
- weight[5] = 0.5
- tm.assert_frame_equal(df.sample(n=1, axis="rows", weights=weight), df.iloc[5:6])
- tm.assert_frame_equal(
- df.sample(n=1, axis="index", weights=weight), df.iloc[5:6]
- )
-
- # Check out of range axis values
- with pytest.raises(ValueError):
- df.sample(n=1, axis=2)
-
- with pytest.raises(ValueError):
- df.sample(n=1, axis="not_a_name")
-
- with pytest.raises(ValueError):
- s = Series(range(10))
- s.sample(n=1, axis=1)
-
- # Test weight length compared to correct axis
- with pytest.raises(ValueError):
- df.sample(n=1, axis=1, weights=[0.5] * 10)
-
- # Check weights with axis = 1
- easy_weight_list = [0] * 3
- easy_weight_list[2] = 1
-
- df = DataFrame(
- {"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
- )
- sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
- tm.assert_frame_equal(sample1, df[["colString"]])
-
- # Test default axes
- tm.assert_frame_equal(
- df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)
- )
-
- # Test that function aligns weights with frame
- df = DataFrame({"col1": [5, 6, 7], "col2": ["a", "b", "c"]}, index=[9, 5, 3])
- s = Series([1, 0, 0], index=[3, 5, 9])
- tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
-
- # Weights have index values to be dropped because not in
- # sampled DataFrame
- s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
- tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
-
- # Weights have empty values to be filed with zeros
- s3 = Series([0.01, 0], index=[3, 5])
- tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
-
- # No overlap in weight and sampled DataFrame indices
- s4 = Series([1, 0], index=[1, 2])
- with pytest.raises(ValueError):
- df.sample(1, weights=s4)
-
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
| Parametrizes a few tests from the frame.methods.test_sample file, splits up giant tests from the generic file. | https://api.github.com/repos/pandas-dev/pandas/pulls/37470 | 2020-10-28T15:46:37Z | 2020-10-29T01:02:30Z | 2020-10-29T01:02:30Z | 2020-10-29T01:58:35Z |
BUG: Fix for #37454: allow reversed axis when plotting with TimedeltaIndex | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 62da3c0c5cddc..694a9afa35130 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -642,9 +642,10 @@ Plotting
- Bug in :meth:`DataFrame.plot` was rotating xticklabels when ``subplots=True``, even if the x-axis wasn't an irregular time series (:issue:`29460`)
- Bug in :meth:`DataFrame.plot` where a marker letter in the ``style`` keyword sometimes causes a ``ValueError`` (:issue:`21003`)
- Twinned axes were losing their tick labels which should only happen to all but the last row or column of 'externally' shared axes (:issue:`33819`)
+- Bug in :meth:`Series.plot` and :meth:`DataFrame.plot` was throwing :exc:`ValueError` with a :class:`Series` or :class:`DataFrame`
+ indexed by a :class:`TimedeltaIndex` with a fixed frequency when x-axis lower limit was greater than upper limit (:issue:`37454`)
- Bug in :meth:`DataFrameGroupBy.boxplot` when ``subplots=False``, a KeyError would raise (:issue:`16748`)
-
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 27c7b931b7136..38789fffed8a0 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -1072,7 +1072,7 @@ def format_timedelta_ticks(x, pos, n_decimals: int) -> str:
def __call__(self, x, pos=0) -> str:
(vmin, vmax) = tuple(self.axis.get_view_interval())
- n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin))))
+ n_decimals = int(np.ceil(np.log10(100 * 1e9 / abs(vmax - vmin))))
if n_decimals > 9:
n_decimals = 9
return self.format_timedelta_ticks(x, pos, n_decimals)
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index 7a367ccab6d52..583ed040c20d5 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -372,3 +372,14 @@ def test_format_timedelta_ticks(self, x, decimal, format_expected):
tdc = converter.TimeSeries_TimedeltaFormatter
result = tdc.format_timedelta_ticks(x, pos=None, n_decimals=decimal)
assert result == format_expected
+
+ @pytest.mark.parametrize("view_interval", [(1, 2), (2, 1)])
+ def test_call_w_different_view_intervals(self, view_interval, monkeypatch):
+ # previously broke on reversed xlmits; see GH37454
+ class mock_axis:
+ def get_view_interval(self):
+ return view_interval
+
+ tdc = converter.TimeSeries_TimedeltaFormatter()
+ monkeypatch.setattr(tdc, "axis", mock_axis())
+ tdc(0.0, 0)
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 777bc914069a6..f3289d0573de2 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -983,3 +983,16 @@ def test_xlabel_ylabel_series(self, kind, index_name, old_label, new_label):
ax = ser.plot(kind=kind, ylabel=new_label, xlabel=new_label)
assert ax.get_ylabel() == new_label
assert ax.get_xlabel() == new_label
+
+ @pytest.mark.parametrize(
+ "index",
+ [
+ pd.timedelta_range(start=0, periods=2, freq="D"),
+ [pd.Timedelta(days=1), pd.Timedelta(days=2)],
+ ],
+ )
+ def test_timedelta_index(self, index):
+ # GH37454
+ xlims = (3, 1)
+ ax = Series([1, 2], index=index).plot(xlim=(xlims))
+ assert ax.get_xlim() == (3, 1)
| - [x] closes #37454
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37469 | 2020-10-28T13:11:42Z | 2020-11-18T00:55:36Z | 2020-11-18T00:55:36Z | 2020-11-18T00:55:41Z |
BUG: Correct crosstab for categorical inputs | diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 842a42f80e1b7..8fae01cb30d3d 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -329,8 +329,7 @@ def _all_key(key):
piece = piece.copy()
try:
piece[all_key] = margin[key]
- except TypeError:
-
+ except ValueError:
# we cannot reshape, so coerce the axis
piece.set_axis(
piece._get_axis(cat_axis)._to_safe_for_reshape(),
diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py
index 1aadcfdc30f1b..5f6037276b31c 100644
--- a/pandas/tests/reshape/test_crosstab.py
+++ b/pandas/tests/reshape/test_crosstab.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+from pandas.core.dtypes.common import is_categorical_dtype
+
from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, Series, crosstab
import pandas._testing as tm
@@ -743,3 +745,33 @@ def test_margin_normalize_multiple_columns(self):
)
expected.index.name = "C"
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("a_dtype", ["category", "int64"])
+@pytest.mark.parametrize("b_dtype", ["category", "int64"])
+def test_categoricals(a_dtype, b_dtype):
+ # https://github.com/pandas-dev/pandas/issues/37465
+ g = np.random.RandomState(25982704)
+ a = Series(g.randint(0, 3, size=100)).astype(a_dtype)
+ b = Series(g.randint(0, 2, size=100)).astype(b_dtype)
+ result = crosstab(a, b, margins=True, dropna=False)
+ columns = Index([0, 1, "All"], dtype="object", name="col_0")
+ index = Index([0, 1, 2, "All"], dtype="object", name="row_0")
+ values = [[18, 16, 34], [18, 16, 34], [16, 16, 32], [52, 48, 100]]
+ expected = DataFrame(values, index, columns)
+ tm.assert_frame_equal(result, expected)
+
+ # Verify when categorical does not have all values present
+ a.loc[a == 1] = 2
+ a_is_cat = is_categorical_dtype(a.dtype)
+ assert not a_is_cat or a.value_counts().loc[1] == 0
+ result = crosstab(a, b, margins=True, dropna=False)
+ values = [[18, 16, 34], [0, 0, np.nan], [34, 32, 66], [52, 48, 100]]
+ expected = DataFrame(values, index, columns)
+ if not a_is_cat:
+ expected = expected.loc[[0, 2, "All"]]
+ expected["All"] = expected["All"].astype("int64")
+ print(result)
+ print(expected)
+ print(expected.loc[[0, 2, "All"]])
+ tm.assert_frame_equal(result, expected)
| Change catch types to reflect error changes
closes #37465
- [X] closes #37465
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry (N/A: Never released, caught in statsmodels pre-release testing)
| https://api.github.com/repos/pandas-dev/pandas/pulls/37468 | 2020-10-28T11:53:26Z | 2020-10-31T15:16:56Z | 2020-10-31T15:16:56Z | 2021-09-10T11:18:13Z |
Backport PR #35898 on branch 1.1.x: CI: docker 32-bit linux build | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 113ad3e338952..b1091ea7f60e4 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -26,3 +26,28 @@ jobs:
parameters:
name: Windows
vmImage: vs2017-win2016
+
+- job: py37_32bit
+ pool:
+ vmImage: ubuntu-18.04
+
+ steps:
+ - script: |
+ docker pull quay.io/pypa/manylinux2014_i686
+ docker run -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \
+ /bin/bash -xc "cd pandas && \
+ /opt/python/cp37-cp37m/bin/python -m venv ~/virtualenvs/pandas-dev && \
+ . ~/virtualenvs/pandas-dev/bin/activate && \
+ python -m pip install --no-deps -U pip wheel setuptools && \
+ pip install cython numpy python-dateutil pytz pytest pytest-xdist hypothesis pytest-azurepipelines && \
+ python setup.py build_ext -q -i -j2 && \
+ python -m pip install --no-build-isolation -e . && \
+ pytest -m 'not slow and not network and not clipboard' pandas --junitxml=test-data.xml"
+ displayName: 'Run 32-bit manylinux2014 Docker Build / Tests'
+
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Publish test results for Python 3.7-32 bit full Linux'
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index ebce5b0ef0a66..bdb283ae445b1 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1255,8 +1255,8 @@ def test_groupby_nat_exclude():
assert grouped.ngroups == 2
expected = {
- Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.int64),
- Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.int64),
+ Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
+ Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py
index 5ef2ce9c47236..6d2d9be97d7fa 100644
--- a/pandas/tests/io/formats/test_info.py
+++ b/pandas/tests/io/formats/test_info.py
@@ -7,7 +7,7 @@
import numpy as np
import pytest
-from pandas.compat import PYPY
+from pandas.compat import IS64, PYPY
from pandas import (
CategoricalIndex,
@@ -405,6 +405,7 @@ def test_info_categorical():
df.info(buf=buf)
+@pytest.mark.xfail(not IS64, reason="GH 36579: fail on 32-bit system")
def test_info_int_columns():
# GH#37245
df = DataFrame({1: [1, 2], 2: [2, 3]}, index=["A", "B"])
| Backport PR #35898 | https://api.github.com/repos/pandas-dev/pandas/pulls/37466 | 2020-10-28T10:46:05Z | 2020-10-28T12:20:33Z | 2020-10-28T12:20:33Z | 2020-10-28T12:22:41Z |
Backport PR #37432 on branch 1.1.x | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 467e3ef00c1a7..6cb728800dc68 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -27,6 +27,7 @@ Fixed regressions
- Fixed regression where slicing :class:`DatetimeIndex` raised :exc:`AssertionError` on irregular time series with ``pd.NaT`` or on unsorted indices (:issue:`36953` and :issue:`35509`)
- Fixed regression in certain offsets (:meth:`pd.offsets.Day() <pandas.tseries.offsets.Day>` and below) no longer being hashable (:issue:`37267`)
- Fixed regression in :class:`StataReader` which required ``chunksize`` to be manually set when using an iterator to read a dataset (:issue:`37280`)
+- Fixed regression in setitem with :meth:`DataFrame.iloc` which raised error when trying to set a value while filtering with a boolean list (:issue:`36741`)
- Fixed regression in :attr:`MultiIndex.is_monotonic_increasing` returning wrong results with ``NaN`` in at least one of the levels (:issue:`37220`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 5a24addf46d93..d21ff6ee17537 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1669,8 +1669,6 @@ def _setitem_with_indexer(self, indexer, value):
"length than the value"
)
- pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
-
def isetter(loc, v):
# positional setting on column loc
ser = self.obj._ixs(loc, axis=1)
@@ -1680,15 +1678,15 @@ def isetter(loc, v):
# which means essentially reassign to the columns of a
# multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
- if isinstance(pi, tuple) and all(
+ if isinstance(plane_indexer, tuple) and all(
com.is_null_slice(idx) or com.is_full_slice(idx, len(self.obj))
- for idx in pi
+ for idx in plane_indexer
):
ser = v
else:
# set the item, possibly having a dtype change
ser = ser.copy()
- ser._mgr = ser._mgr.setitem(indexer=pi, value=v)
+ ser._mgr = ser._mgr.setitem(indexer=plane_indexer, value=v)
ser._maybe_update_cacher(clear=True)
# reset the sliced object if unique
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index c5945edfd3127..3d19a2567f3a5 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -184,3 +184,12 @@ def test_setitem_extension_types(self, obj, dtype):
df["obj"] = obj
tm.assert_frame_equal(df, expected)
+
+ @pytest.mark.parametrize("klass", [list, np.array])
+ def test_iloc_setitem_bool_indexer(self, klass):
+ # GH: 36741
+ df = DataFrame({"flag": ["x", "y", "z"], "value": [1, 3, 4]})
+ indexer = klass([True, False, False])
+ df.iloc[indexer, 1] = df.iloc[indexer, 1] * 2
+ expected = DataFrame({"flag": ["x", "y", "z"], "value": [2, 3, 4]})
+ tm.assert_frame_equal(df, expected)
| (cherry picked from commit 52925335fe6d5f1ea6c79bb03da70654d9be9668)
Tried to merge it. Hopefully nothing was broken in the process. Run the indexing tests locally, but have to wait for ci
cc @simonjayhawkins | https://api.github.com/repos/pandas-dev/pandas/pulls/37463 | 2020-10-28T08:27:23Z | 2020-10-28T09:27:50Z | 2020-10-28T09:27:50Z | 2020-10-28T09:32:25Z |
BUG: Metadata propagation for groupby iterator | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index cf728d94b2a55..a122154904996 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -23,7 +23,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
--
+- Bug in metadata propagation for ``groupby`` iterator (:issue:`37343`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index bca71b5c9646b..7d772b2d07a4e 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -140,9 +140,16 @@ def get_iterator(
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
- yield key, group
+ yield key, group.__finalize__(data, method="groupby")
def _get_splitter(self, data: FrameOrSeries, axis: int = 0) -> "DataSplitter":
+ """
+ Returns
+ -------
+ Generator yielding subsetted objects
+
+ __finalize__ has not been called for the the subsetted objects returned.
+ """
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
@@ -921,7 +928,8 @@ class SeriesSplitter(DataSplitter):
def _chop(self, sdata: Series, slice_obj: slice) -> Series:
# fastpath equivalent to `sdata.iloc[slice_obj]`
mgr = sdata._mgr.get_slice(slice_obj)
- return type(sdata)(mgr, name=sdata.name, fastpath=True)
+ # __finalize__ not called here, must be applied by caller if applicable
+ return sdata._constructor(mgr, name=sdata.name, fastpath=True)
class FrameSplitter(DataSplitter):
@@ -937,7 +945,8 @@ def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
# else:
# return sdata.iloc[:, slice_obj]
mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)
- return type(sdata)(mgr)
+ # __finalize__ not called here, must be applied by caller if applicable
+ return sdata._constructor(mgr)
def get_splitter(
diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py
index cc7a79e976513..d268d87708552 100644
--- a/pandas/tests/groupby/test_groupby_subclass.py
+++ b/pandas/tests/groupby/test_groupby_subclass.py
@@ -51,6 +51,15 @@ def test_groupby_preserves_subclass(obj, groupby_func):
tm.assert_series_equal(result1, result2)
+def test_groupby_preserves_metadata():
+ # GH-37343
+ custom_df = tm.SubclassedDataFrame({"a": [1, 2, 3], "b": [1, 1, 2], "c": [7, 8, 9]})
+ assert "testattr" in custom_df._metadata
+ custom_df.testattr = "hello"
+ for _, group_df in custom_df.groupby("c"):
+ assert group_df.testattr == "hello"
+
+
@pytest.mark.parametrize("obj", [DataFrame, tm.SubclassedDataFrame])
def test_groupby_resample_preserves_subclass(obj):
# GH28330 -- preserve subclass through groupby.resample()
| Addresses part of #28283
This PR ensures that `__finalize__` is called for objects returned by iterator over `.groupby`-results.
Based on the discussion and benchmarks in PR #35688 (see https://github.com/pandas-dev/pandas/pull/35688#issuecomment-675367517), `__finalize__` is _not_ called for intermediate objects used by `.apply` (and maybe other code paths).
The implemented solution is the safe choice with regard to performance: An alternative solution would be to call `__finalize__` immediately after initialization (in `DataSplitter._chop()`).
The downside to doing so would be the performance hit due to the overhead of finalizing intermediate objects in `.apply` (and maybe other code paths). In the benchmarks referenced above, this amounted to around 4%.
The upside of finalizing immediately after initialization would be reduced complexity and that it would allow `.apply` (and any other code paths using `DataSplitter._chop` to access metadata)..
- [x] closes #37343
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37461 | 2020-10-28T07:30:05Z | 2020-11-04T13:22:55Z | 2020-11-04T13:22:55Z | 2020-11-04T14:19:10Z |
Backport PR #37439 on branch 1.1.x (REGR: fix rank algo for read-only data) | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index 33a52353bed7e..467e3ef00c1a7 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -19,6 +19,7 @@ Fixed regressions
- Fixed regression where :meth:`DataFrame.agg` would fail with :exc:`TypeError` when passed positional arguments to be passed on to the aggregation function (:issue:`36948`).
- Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`)
- Fixed regression in :meth:`Series.astype` converting ``None`` to ``"nan"`` when casting to string (:issue:`36904`)
+- Fixed regression in :meth:`Series.rank` method failing for read-only data (:issue:`37290`)
- Fixed regression in :class:`RollingGroupby` causing a segmentation fault with Index of dtype object (:issue:`36727`)
- Fixed regression in :meth:`DataFrame.resample(...).apply(...)` raised ``AttributeError`` when input was a :class:`DataFrame` and only a :class:`Series` was evaluated (:issue:`36951`)
- Fixed regression in ``DataFrame.groupby(..).std()`` with nullable integer dtype (:issue:`37415`)
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 0a70afda893cf..564dfc3a2ca5a 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -325,7 +325,7 @@ def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None):
@cython.boundscheck(False)
@cython.wraparound(False)
-def nancorr_spearman(const float64_t[:, :] mat, Py_ssize_t minp=1) -> ndarray:
+def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1) -> ndarray:
cdef:
Py_ssize_t i, j, xi, yi, N, K
ndarray[float64_t, ndim=2] result
@@ -799,7 +799,7 @@ ctypedef fused rank_t:
@cython.wraparound(False)
@cython.boundscheck(False)
def rank_1d(
- rank_t[:] in_arr,
+ ndarray[rank_t, ndim=1] in_arr,
ties_method="average",
bint ascending=True,
na_option="keep",
@@ -1018,7 +1018,7 @@ def rank_1d(
def rank_2d(
- rank_t[:, :] in_arr,
+ ndarray[rank_t, ndim=2] in_arr,
int axis=0,
ties_method="average",
bint ascending=True,
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index a8a55418a619a..a78f8ad3cd4dd 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1751,11 +1751,13 @@ def _check(arr):
_check(np.array([np.nan, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 3, np.nan]))
_check(np.array([4.0, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 4.0, np.nan]))
- def test_basic(self):
+ def test_basic(self, writable):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes["AllInteger"]:
- s = Series([1, 100], dtype=dtype)
+ data = np.array([1, 100], dtype=dtype)
+ data.setflags(write=writable)
+ s = Series(data)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
| Backport PR #37439: REGR: fix rank algo for read-only data | https://api.github.com/repos/pandas-dev/pandas/pulls/37459 | 2020-10-28T02:23:41Z | 2020-10-28T07:24:12Z | 2020-10-28T07:24:12Z | 2020-10-28T07:24:13Z |
CLN: de-duplicate validator | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index a6c74294c4c75..cd312b09ab6c1 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -602,10 +602,9 @@ def _validate_listlike(self, value, allow_object: bool = False):
pass
elif not type(self)._is_recognized_dtype(value.dtype):
- raise TypeError(
- f"value should be a '{self._scalar_type.__name__}', 'NaT', "
- f"or array of those. Got '{type(value).__name__}' instead."
- )
+ msg = self._validation_error_message(value, True)
+ raise TypeError(msg)
+
return value
def _validate_searchsorted_value(self, value):
@@ -624,19 +623,13 @@ def _validate_setitem_value(self, value):
return self._unbox(value, setitem=True)
+ _validate_where_value = _validate_setitem_value
+
def _validate_insert_value(self, value):
value = self._validate_scalar(value)
return self._unbox(value, setitem=True)
- def _validate_where_value(self, other):
- if not is_list_like(other):
- other = self._validate_scalar(other, True)
- else:
- other = self._validate_listlike(other)
-
- return self._unbox(other, setitem=True)
-
def _unbox(
self, other, setitem: bool = False
) -> Union[np.int64, np.datetime64, np.timedelta64, np.ndarray]:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37458 | 2020-10-28T02:21:57Z | 2020-10-30T16:07:45Z | 2020-10-30T16:07:45Z | 2020-10-30T17:02:31Z |
BUG: BusinessDay.apply_index with offset | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 7c850ffedfcab..281edf1834fb2 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -390,6 +390,7 @@ Datetimelike
- Bug in :meth:`DatetimeIndex.equals` and :meth:`TimedeltaIndex.equals` incorrectly considering ``int64`` indexes as equal (:issue:`36744`)
- Bug in :meth:`TimedeltaIndex.sum` and :meth:`Series.sum` with ``timedelta64`` dtype on an empty index or series returning ``NaT`` instead of ``Timedelta(0)`` (:issue:`31751`)
- Bug in :meth:`DatetimeArray.shift` incorrectly allowing ``fill_value`` with a mismatched timezone (:issue:`37299`)
+- Bug in adding a :class:`BusinessDay` with nonzero ``offset`` to a non-scalar other (:issue:`37457`)
Timedelta
^^^^^^^^^
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 98b2ddbd21ee1..dbd094905cf24 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -1388,7 +1388,11 @@ cdef class BusinessDay(BusinessMixin):
@apply_array_wraps
def _apply_array(self, dtarr):
i8other = dtarr.view("i8")
- return shift_bdays(i8other, self.n)
+ res = _shift_bdays(i8other, self.n)
+ if self.offset:
+ res = res.view("M8[ns]") + Timedelta(self.offset)
+ res = res.view("i8")
+ return res
def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
@@ -3778,7 +3782,7 @@ cdef inline void _shift_quarters(const int64_t[:] dtindex,
out[i] = dtstruct_to_dt64(&dts)
-cdef ndarray[int64_t] shift_bdays(const int64_t[:] i8other, int periods):
+cdef ndarray[int64_t] _shift_bdays(const int64_t[:] i8other, int periods):
"""
Implementation of BusinessDay.apply_offset.
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 922aff1792227..fba123e47feb2 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -750,6 +750,13 @@ def test_with_offset(self):
assert (self.d + offset) == datetime(2008, 1, 2, 2)
+ def test_with_offset_index(self):
+ dti = DatetimeIndex([self.d])
+ result = dti + (self.offset + timedelta(hours=2))
+
+ expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
+ tm.assert_index_equal(result, expected)
+
def test_eq(self):
assert self.offset2 == self.offset2
@@ -2642,6 +2649,13 @@ def test_with_offset(self):
assert (self.d + offset) == datetime(2008, 1, 2, 2)
+ def test_with_offset_index(self):
+ dti = DatetimeIndex([self.d])
+ result = dti + (self.offset + timedelta(hours=2))
+
+ expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
+ tm.assert_index_equal(result, expected)
+
def test_eq(self):
assert self.offset2 == self.offset2
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37457 | 2020-10-28T01:05:48Z | 2020-11-01T01:50:41Z | 2020-11-01T01:50:41Z | 2020-11-01T02:16:39Z |
TST/REF: misplaced .xs tests | diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py
index 20e8e252615d3..11e076f313540 100644
--- a/pandas/tests/frame/indexing/test_xs.py
+++ b/pandas/tests/frame/indexing/test_xs.py
@@ -3,12 +3,30 @@
import numpy as np
import pytest
-from pandas import DataFrame, Index, Series
+from pandas import DataFrame, Index, IndexSlice, MultiIndex, Series, concat
import pandas._testing as tm
+import pandas.core.common as com
from pandas.tseries.offsets import BDay
+@pytest.fixture
+def four_level_index_dataframe():
+ arr = np.array(
+ [
+ [-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
+ [0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
+ [-0.6662, -0.5243, -0.358, 0.89145, 2.5838],
+ ]
+ )
+ index = MultiIndex(
+ levels=[["a", "x"], ["b", "q"], [10.0032, 20.0, 30.0], [3, 4, 5]],
+ codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]],
+ names=["one", "two", "three", "four"],
+ )
+ return DataFrame(arr, index=index, columns=list("ABCDE"))
+
+
class TestXS:
def test_xs(self, float_frame, datetime_frame):
idx = float_frame.index[5]
@@ -92,3 +110,190 @@ def test_xs_view(self):
dm.xs(2)[:] = 10
assert (dm.xs(2) == 10).all()
+
+
+class TestXSWithMultiIndex:
+ def test_xs_integer_key(self):
+ # see GH#2107
+ dates = range(20111201, 20111205)
+ ids = list("abcde")
+ index = MultiIndex.from_product([dates, ids], names=["date", "secid"])
+ df = DataFrame(np.random.randn(len(index), 3), index, ["X", "Y", "Z"])
+
+ result = df.xs(20111201, level="date")
+ expected = df.loc[20111201, :]
+ tm.assert_frame_equal(result, expected)
+
+ def test_xs_level(self, multiindex_dataframe_random_data):
+ df = multiindex_dataframe_random_data
+ result = df.xs("two", level="second")
+ expected = df[df.index.get_level_values(1) == "two"]
+ expected.index = Index(["foo", "bar", "baz", "qux"], name="first")
+ tm.assert_frame_equal(result, expected)
+
+ def test_xs_level_eq_2(self):
+ arr = np.random.randn(3, 5)
+ index = MultiIndex(
+ levels=[["a", "p", "x"], ["b", "q", "y"], ["c", "r", "z"]],
+ codes=[[2, 0, 1], [2, 0, 1], [2, 0, 1]],
+ )
+ df = DataFrame(arr, index=index)
+ expected = DataFrame(arr[1:2], index=[["a"], ["b"]])
+ result = df.xs("c", level=2)
+ tm.assert_frame_equal(result, expected)
+
+ def test_xs_setting_with_copy_error(self, multiindex_dataframe_random_data):
+ # this is a copy in 0.14
+ df = multiindex_dataframe_random_data
+ result = df.xs("two", level="second")
+
+ # setting this will give a SettingWithCopyError
+ # as we are trying to write a view
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(com.SettingWithCopyError, match=msg):
+ result[:] = 10
+
+ def test_xs_setting_with_copy_error_multiple(self, four_level_index_dataframe):
+ # this is a copy in 0.14
+ df = four_level_index_dataframe
+ result = df.xs(("a", 4), level=["one", "four"])
+
+ # setting this will give a SettingWithCopyError
+ # as we are trying to write a view
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(com.SettingWithCopyError, match=msg):
+ result[:] = 10
+
+ @pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
+ def test_xs_with_duplicates(self, key, level, multiindex_dataframe_random_data):
+ # see GH#13719
+ frame = multiindex_dataframe_random_data
+ df = concat([frame] * 2)
+ assert df.index.is_unique is False
+ expected = concat([frame.xs("one", level="second")] * 2)
+
+ result = df.xs(key, level=level)
+ tm.assert_frame_equal(result, expected)
+
+ def test_xs_missing_values_in_index(self):
+ # see GH#6574
+ # missing values in returned index should be preserved
+ acc = [
+ ("a", "abcde", 1),
+ ("b", "bbcde", 2),
+ ("y", "yzcde", 25),
+ ("z", "xbcde", 24),
+ ("z", None, 26),
+ ("z", "zbcde", 25),
+ ("z", "ybcde", 26),
+ ]
+ df = DataFrame(acc, columns=["a1", "a2", "cnt"]).set_index(["a1", "a2"])
+ expected = DataFrame(
+ {"cnt": [24, 26, 25, 26]},
+ index=Index(["xbcde", np.nan, "zbcde", "ybcde"], name="a2"),
+ )
+
+ result = df.xs("z", level="a1")
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "key, level, exp_arr, exp_index",
+ [
+ ("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")),
+ ("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")),
+ ],
+ )
+ def test_xs_named_levels_axis_eq_1(self, key, level, exp_arr, exp_index):
+ # see GH#2903
+ arr = np.random.randn(4, 4)
+ index = MultiIndex(
+ levels=[["a", "b"], ["bar", "foo", "hello", "world"]],
+ codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
+ names=["lvl0", "lvl1"],
+ )
+ df = DataFrame(arr, columns=index)
+ result = df.xs(key, level=level, axis=1)
+ expected = DataFrame(exp_arr(arr), columns=exp_index)
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "indexer",
+ [
+ lambda df: df.xs(("a", 4), level=["one", "four"]),
+ lambda df: df.xs("a").xs(4, level="four"),
+ ],
+ )
+ def test_xs_level_multiple(self, indexer, four_level_index_dataframe):
+ df = four_level_index_dataframe
+ expected_values = [[0.4473, 1.4152, 0.2834, 1.00661, 0.1744]]
+ expected_index = MultiIndex(
+ levels=[["q"], [20.0]], codes=[[0], [0]], names=["two", "three"]
+ )
+ expected = DataFrame(
+ expected_values, index=expected_index, columns=list("ABCDE")
+ )
+ result = indexer(df)
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "indexer", [lambda df: df.xs("a", level=0), lambda df: df.xs("a")]
+ )
+ def test_xs_level0(self, indexer, four_level_index_dataframe):
+ df = four_level_index_dataframe
+ expected_values = [
+ [-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
+ [0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
+ ]
+ expected_index = MultiIndex(
+ levels=[["b", "q"], [10.0032, 20.0], [4, 5]],
+ codes=[[0, 1], [0, 1], [1, 0]],
+ names=["two", "three", "four"],
+ )
+ expected = DataFrame(
+ expected_values, index=expected_index, columns=list("ABCDE")
+ )
+
+ result = indexer(df)
+ tm.assert_frame_equal(result, expected)
+
+ def test_xs_values(self, multiindex_dataframe_random_data):
+ df = multiindex_dataframe_random_data
+ result = df.xs(("bar", "two")).values
+ expected = df.values[4]
+ tm.assert_almost_equal(result, expected)
+
+ def test_xs_loc_equality(self, multiindex_dataframe_random_data):
+ df = multiindex_dataframe_random_data
+ result = df.xs(("bar", "two"))
+ expected = df.loc[("bar", "two")]
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("klass", [DataFrame, Series])
+ def test_xs_IndexSlice_argument_not_implemented(self, klass):
+ # GH#35301
+
+ index = MultiIndex(
+ levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]],
+ codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
+ )
+
+ obj = DataFrame(np.random.randn(6, 4), index=index)
+ if klass is Series:
+ obj = obj[0]
+
+ msg = (
+ "Expected label or tuple of labels, got "
+ r"\(\('foo', 'qux', 0\), slice\(None, None, None\)\)"
+ )
+ with pytest.raises(TypeError, match=msg):
+ obj.xs(IndexSlice[("foo", "qux", 0), :])
+
+ @pytest.mark.parametrize("klass", [DataFrame, Series])
+ def test_xs_levels_raises(self, klass):
+ obj = DataFrame({"A": [1, 2, 3]})
+ if klass is Series:
+ obj = obj["A"]
+
+ msg = "Index must be a MultiIndex"
+ with pytest.raises(TypeError, match=msg):
+ obj.xs(0, level="as")
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index 81be110fd11e5..57747f8274d85 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -662,6 +662,13 @@ def test_get_loc_past_lexsort_depth(self):
assert result == slice(0, 1, None)
+ def test_multiindex_get_loc_list_raises(self):
+ # GH#35878
+ idx = MultiIndex.from_tuples([("a", 1), ("b", 2)])
+ msg = "unhashable type"
+ with pytest.raises(TypeError, match=msg):
+ idx.get_loc([])
+
class TestWhere:
def test_where(self):
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 0e466b49f6597..2cb5b55f14596 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -590,3 +590,11 @@ def test_missing_key_raises_keyerror2(self):
with pytest.raises(KeyError, match=r"\(0, 3\)"):
ser.loc[0, 3]
+
+
+def test_getitem_loc_commutability(multiindex_year_month_day_dataframe_random_data):
+ df = multiindex_year_month_day_dataframe_random_data
+ ser = df["A"]
+ result = ser[2000, 5]
+ expected = df.loc[2000, 5]["A"]
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index ed1348efb5cba..a3b8d66c92024 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -1,5 +1,4 @@
import numpy as np
-import pytest
import pandas._libs.index as _index
from pandas.errors import PerformanceWarning
@@ -84,10 +83,3 @@ def test_nested_tuples_duplicates(self):
df3 = df.copy(deep=True)
df3.loc[[(dti[0], "a")], "c2"] = 1.0
tm.assert_frame_equal(df3, expected)
-
- def test_multiindex_get_loc_list_raises(self):
- # https://github.com/pandas-dev/pandas/issues/35878
- idx = pd.MultiIndex.from_tuples([("a", 1), ("b", 2)])
- msg = "unhashable type"
- with pytest.raises(TypeError, match=msg):
- idx.get_loc([])
diff --git a/pandas/tests/indexing/multiindex/test_xs.py b/pandas/tests/indexing/multiindex/test_xs.py
deleted file mode 100644
index 91be1d913001b..0000000000000
--- a/pandas/tests/indexing/multiindex/test_xs.py
+++ /dev/null
@@ -1,280 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import DataFrame, Index, IndexSlice, MultiIndex, Series, concat, date_range
-import pandas._testing as tm
-import pandas.core.common as com
-
-
-@pytest.fixture
-def four_level_index_dataframe():
- arr = np.array(
- [
- [-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
- [0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
- [-0.6662, -0.5243, -0.358, 0.89145, 2.5838],
- ]
- )
- index = MultiIndex(
- levels=[["a", "x"], ["b", "q"], [10.0032, 20.0, 30.0], [3, 4, 5]],
- codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]],
- names=["one", "two", "three", "four"],
- )
- return DataFrame(arr, index=index, columns=list("ABCDE"))
-
-
-@pytest.mark.parametrize(
- "key, level, exp_arr, exp_index",
- [
- ("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")),
- ("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")),
- ],
-)
-def test_xs_named_levels_axis_eq_1(key, level, exp_arr, exp_index):
- # see gh-2903
- arr = np.random.randn(4, 4)
- index = MultiIndex(
- levels=[["a", "b"], ["bar", "foo", "hello", "world"]],
- codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
- names=["lvl0", "lvl1"],
- )
- df = DataFrame(arr, columns=index)
- result = df.xs(key, level=level, axis=1)
- expected = DataFrame(exp_arr(arr), columns=exp_index)
- tm.assert_frame_equal(result, expected)
-
-
-def test_xs_values(multiindex_dataframe_random_data):
- df = multiindex_dataframe_random_data
- result = df.xs(("bar", "two")).values
- expected = df.values[4]
- tm.assert_almost_equal(result, expected)
-
-
-def test_xs_loc_equality(multiindex_dataframe_random_data):
- df = multiindex_dataframe_random_data
- result = df.xs(("bar", "two"))
- expected = df.loc[("bar", "two")]
- tm.assert_series_equal(result, expected)
-
-
-def test_xs_missing_values_in_index():
- # see gh-6574
- # missing values in returned index should be preserved
- acc = [
- ("a", "abcde", 1),
- ("b", "bbcde", 2),
- ("y", "yzcde", 25),
- ("z", "xbcde", 24),
- ("z", None, 26),
- ("z", "zbcde", 25),
- ("z", "ybcde", 26),
- ]
- df = DataFrame(acc, columns=["a1", "a2", "cnt"]).set_index(["a1", "a2"])
- expected = DataFrame(
- {"cnt": [24, 26, 25, 26]},
- index=Index(["xbcde", np.nan, "zbcde", "ybcde"], name="a2"),
- )
-
- result = df.xs("z", level="a1")
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
-def test_xs_with_duplicates(key, level, multiindex_dataframe_random_data):
- # see gh-13719
- frame = multiindex_dataframe_random_data
- df = concat([frame] * 2)
- assert df.index.is_unique is False
- expected = concat([frame.xs("one", level="second")] * 2)
-
- result = df.xs(key, level=level)
- tm.assert_frame_equal(result, expected)
-
-
-def test_xs_level(multiindex_dataframe_random_data):
- df = multiindex_dataframe_random_data
- result = df.xs("two", level="second")
- expected = df[df.index.get_level_values(1) == "two"]
- expected.index = Index(["foo", "bar", "baz", "qux"], name="first")
- tm.assert_frame_equal(result, expected)
-
-
-def test_xs_level_eq_2():
- arr = np.random.randn(3, 5)
- index = MultiIndex(
- levels=[["a", "p", "x"], ["b", "q", "y"], ["c", "r", "z"]],
- codes=[[2, 0, 1], [2, 0, 1], [2, 0, 1]],
- )
- df = DataFrame(arr, index=index)
- expected = DataFrame(arr[1:2], index=[["a"], ["b"]])
- result = df.xs("c", level=2)
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "indexer",
- [
- lambda df: df.xs(("a", 4), level=["one", "four"]),
- lambda df: df.xs("a").xs(4, level="four"),
- ],
-)
-def test_xs_level_multiple(indexer, four_level_index_dataframe):
- df = four_level_index_dataframe
- expected_values = [[0.4473, 1.4152, 0.2834, 1.00661, 0.1744]]
- expected_index = MultiIndex(
- levels=[["q"], [20.0]], codes=[[0], [0]], names=["two", "three"]
- )
- expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
- result = indexer(df)
- tm.assert_frame_equal(result, expected)
-
-
-def test_xs_setting_with_copy_error(multiindex_dataframe_random_data):
- # this is a copy in 0.14
- df = multiindex_dataframe_random_data
- result = df.xs("two", level="second")
-
- # setting this will give a SettingWithCopyError
- # as we are trying to write a view
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(com.SettingWithCopyError, match=msg):
- result[:] = 10
-
-
-def test_xs_setting_with_copy_error_multiple(four_level_index_dataframe):
- # this is a copy in 0.14
- df = four_level_index_dataframe
- result = df.xs(("a", 4), level=["one", "four"])
-
- # setting this will give a SettingWithCopyError
- # as we are trying to write a view
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(com.SettingWithCopyError, match=msg):
- result[:] = 10
-
-
-def test_xs_integer_key():
- # see gh-2107
- dates = range(20111201, 20111205)
- ids = list("abcde")
- index = MultiIndex.from_product([dates, ids], names=["date", "secid"])
- df = DataFrame(np.random.randn(len(index), 3), index, ["X", "Y", "Z"])
-
- result = df.xs(20111201, level="date")
- expected = df.loc[20111201, :]
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "indexer", [lambda df: df.xs("a", level=0), lambda df: df.xs("a")]
-)
-def test_xs_level0(indexer, four_level_index_dataframe):
- df = four_level_index_dataframe
- expected_values = [
- [-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
- [0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
- ]
- expected_index = MultiIndex(
- levels=[["b", "q"], [10.0032, 20.0], [4, 5]],
- codes=[[0, 1], [0, 1], [1, 0]],
- names=["two", "three", "four"],
- )
- expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
-
- result = indexer(df)
- tm.assert_frame_equal(result, expected)
-
-
-def test_xs_level_series(multiindex_dataframe_random_data):
- # this test is not explicitly testing .xs functionality
- # TODO: move to another module or refactor
- df = multiindex_dataframe_random_data
- s = df["A"]
- result = s[:, "two"]
- expected = df.xs("two", level=1)["A"]
- tm.assert_series_equal(result, expected)
-
-
-def test_xs_level_series_ymd(multiindex_year_month_day_dataframe_random_data):
- # this test is not explicitly testing .xs functionality
- # TODO: move to another module or refactor
- df = multiindex_year_month_day_dataframe_random_data
- s = df["A"]
- result = s[2000, 5]
- expected = df.loc[2000, 5]["A"]
- tm.assert_series_equal(result, expected)
-
-
-def test_xs_level_series_slice_not_implemented(
- multiindex_year_month_day_dataframe_random_data,
-):
- # this test is not explicitly testing .xs functionality
- # TODO: move to another module or refactor
- # not implementing this for now
- df = multiindex_year_month_day_dataframe_random_data
- s = df["A"]
-
- msg = r"\(2000, slice\(3, 4, None\)\)"
- with pytest.raises(TypeError, match=msg):
- s[2000, 3:4]
-
-
-def test_xs_IndexSlice_argument_not_implemented():
- # GH 35301
-
- index = MultiIndex(
- levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]],
- codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
- )
-
- series = Series(np.random.randn(6), index=index)
- frame = DataFrame(np.random.randn(6, 4), index=index)
-
- msg = (
- "Expected label or tuple of labels, got "
- r"\(\('foo', 'qux', 0\), slice\(None, None, None\)\)"
- )
- with pytest.raises(TypeError, match=msg):
- frame.xs(IndexSlice[("foo", "qux", 0), :])
- with pytest.raises(TypeError, match=msg):
- series.xs(IndexSlice[("foo", "qux", 0), :])
-
-
-def test_series_getitem_multiindex_xs():
- # GH6258
- dt = list(date_range("20130903", periods=3))
- idx = MultiIndex.from_product([list("AB"), dt])
- s = Series([1, 3, 4, 1, 3, 4], index=idx)
- expected = Series([1, 1], index=list("AB"))
-
- result = s.xs("20130903", level=1)
- tm.assert_series_equal(result, expected)
-
-
-def test_series_getitem_multiindex_xs_by_label():
- # GH5684
- idx = MultiIndex.from_tuples(
- [("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")]
- )
- s = Series([1, 2, 3, 4], index=idx)
- return_value = s.index.set_names(["L1", "L2"], inplace=True)
- assert return_value is None
- expected = Series([1, 3], index=["a", "b"])
- return_value = expected.index.set_names(["L1"], inplace=True)
- assert return_value is None
-
- result = s.xs("one", level="L2")
- tm.assert_series_equal(result, expected)
-
-
-def test_xs_levels_raises():
- df = DataFrame({"A": [1, 2, 3]})
-
- msg = "Index must be a MultiIndex"
- with pytest.raises(TypeError, match=msg):
- df.xs(0, level="as")
-
- s = df.A
- with pytest.raises(TypeError, match=msg):
- s.xs(0, level="as")
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 0da1f74d9cde6..9f6aab823c3ad 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -278,3 +278,15 @@ def test_getitem_ndim_deprecated():
s = Series([0, 1])
with tm.assert_produces_warning(FutureWarning):
s[:, None]
+
+
+def test_getitem_multilevel_scalar_slice_not_implemented(
+ multiindex_year_month_day_dataframe_random_data,
+):
+ # not implementing this for now
+ df = multiindex_year_month_day_dataframe_random_data
+ ser = df["A"]
+
+ msg = r"\(2000, slice\(3, 4, None\)\)"
+ with pytest.raises(TypeError, match=msg):
+ ser[2000, 3:4]
diff --git a/pandas/tests/series/indexing/test_xs.py b/pandas/tests/series/indexing/test_xs.py
index 43458ca2ebeb2..1a23b09bde816 100644
--- a/pandas/tests/series/indexing/test_xs.py
+++ b/pandas/tests/series/indexing/test_xs.py
@@ -1,13 +1,14 @@
import numpy as np
-import pandas as pd
+from pandas import MultiIndex, Series, date_range
+import pandas._testing as tm
def test_xs_datetimelike_wrapping():
# GH#31630 a case where we shouldn't wrap datetime64 in Timestamp
- arr = pd.date_range("2016-01-01", periods=3)._data._data
+ arr = date_range("2016-01-01", periods=3)._data._data
- ser = pd.Series(arr, dtype=object)
+ ser = Series(arr, dtype=object)
for i in range(len(ser)):
ser.iloc[i] = arr[i]
assert ser.dtype == object
@@ -15,3 +16,37 @@ def test_xs_datetimelike_wrapping():
result = ser.xs(0)
assert isinstance(result, np.datetime64)
+
+
+class TestXSWithMultiIndex:
+ def test_xs_level_series(self, multiindex_dataframe_random_data):
+ df = multiindex_dataframe_random_data
+ ser = df["A"]
+ expected = ser[:, "two"]
+ result = df.xs("two", level=1)["A"]
+ tm.assert_series_equal(result, expected)
+
+ def test_series_getitem_multiindex_xs_by_label(self):
+ # GH#5684
+ idx = MultiIndex.from_tuples(
+ [("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")]
+ )
+ ser = Series([1, 2, 3, 4], index=idx)
+ return_value = ser.index.set_names(["L1", "L2"], inplace=True)
+ assert return_value is None
+ expected = Series([1, 3], index=["a", "b"])
+ return_value = expected.index.set_names(["L1"], inplace=True)
+ assert return_value is None
+
+ result = ser.xs("one", level="L2")
+ tm.assert_series_equal(result, expected)
+
+ def test_series_getitem_multiindex_xs(xs):
+ # GH#6258
+ dt = list(date_range("20130903", periods=3))
+ idx = MultiIndex.from_product([list("AB"), dt])
+ ser = Series([1, 3, 4, 1, 3, 4], index=idx)
+ expected = Series([1, 1], index=list("AB"))
+
+ result = ser.xs("20130903", level=1)
+ tm.assert_series_equal(result, expected)
| https://api.github.com/repos/pandas-dev/pandas/pulls/37456 | 2020-10-28T00:39:50Z | 2020-10-29T01:01:03Z | 2020-10-29T01:01:03Z | 2020-10-29T01:59:36Z | |
CI: Troubleshoot PY38 windows build | diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
index 5938ba1fd69f5..601a834d6306a 100644
--- a/ci/azure/windows.yml
+++ b/ci/azure/windows.yml
@@ -16,7 +16,7 @@ jobs:
py38_np18:
ENV_FILE: ci/deps/azure-windows-38.yaml
CONDA_PY: "38"
- PATTERN: "not slow and not network"
+ PATTERN: "not slow and not network and not high_memory"
steps:
- powershell: |
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index fda2005ce7843..9b553fbc81a03 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -22,6 +22,12 @@ fi
PYTEST_CMD="${XVFB}pytest -m \"$PATTERN\" -n $PYTEST_WORKERS --dist=loadfile -s --strict --durations=30 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas"
+if [[ $(uname) != "Linux" && $(uname) != "Darwin" ]]; then
+ # GH#37455 windows py38 build appears to be running out of memory
+ # skip collection of window tests
+ PYTEST_CMD="$PYTEST_CMD --ignore=pandas/tests/window/"
+fi
+
echo $PYTEST_CMD
sh -c "$PYTEST_CMD"
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 34d56672e5536..b6dde90ce161f 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -2,6 +2,7 @@
import datetime
import inspect
import pydoc
+import warnings
import numpy as np
import pytest
@@ -561,9 +562,13 @@ def test_constructor_expanddim_lookup(self):
# raise NotImplementedError
df = DataFrame()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with warnings.catch_warnings(record=True) as wrn:
# _AXIS_NUMBERS, _AXIS_NAMES lookups
inspect.getmembers(df)
+ # some versions give FutureWarning, others DeprecationWarning
+ assert len(wrn)
+ assert any(x.category in [FutureWarning, DeprecationWarning] for x in wrn)
+
with pytest.raises(NotImplementedError, match="Not supported for DataFrames!"):
df._constructor_expanddim(np.arange(27).reshape(3, 3, 3))
| Some of the error messages look like a OOM, so this is just trying cutting down the number of tests collected/run by ~40% | https://api.github.com/repos/pandas-dev/pandas/pulls/37455 | 2020-10-27T23:07:06Z | 2020-10-31T14:19:49Z | 2020-10-31T14:19:49Z | 2020-10-31T15:20:28Z |
ENH: Improve numerical stability for Pearson corr() and cov() | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index f1f24ab7a101b..53c56fe3f6909 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -219,6 +219,7 @@ Other enhancements
- Where possible :meth:`RangeIndex.difference` and :meth:`RangeIndex.symmetric_difference` will return :class:`RangeIndex` instead of :class:`Int64Index` (:issue:`36564`)
- Added :meth:`Rolling.sem()` and :meth:`Expanding.sem()` to compute the standard error of mean (:issue:`26476`).
- :meth:`Rolling.var()` and :meth:`Rolling.std()` use Kahan summation and Welfords Method to avoid numerical issues (:issue:`37051`)
+- :meth:`DataFrame.corr` and :meth:`DataFrame.cov` use Welfords Method to avoid numerical issues (:issue:`37448`)
- :meth:`DataFrame.plot` now recognizes ``xlabel`` and ``ylabel`` arguments for plots of type ``scatter`` and ``hexbin`` (:issue:`37001`)
- :class:`DataFrame` now supports ``divmod`` operation (:issue:`37165`)
- :meth:`DataFrame.to_parquet` now returns a ``bytes`` object when no ``path`` argument is passed (:issue:`37105`)
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index da5ae97bb067b..620889ad39889 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -268,7 +268,8 @@ def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None):
ndarray[float64_t, ndim=2] result
ndarray[uint8_t, ndim=2] mask
int64_t nobs = 0
- float64_t vx, vy, sumx, sumy, sumxx, sumyy, meanx, meany, divisor
+ float64_t vx, vy, meanx, meany, divisor, prev_meany, prev_meanx, ssqdmx
+ float64_t ssqdmy, covxy
N, K = (<object>mat).shape
@@ -283,37 +284,29 @@ def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None):
with nogil:
for xi in range(K):
for yi in range(xi + 1):
- nobs = sumxx = sumyy = sumx = sumy = 0
+ # Welford's method for the variance-calculation
+ # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ nobs = ssqdmx = ssqdmy = covxy = meanx = meany = 0
for i in range(N):
if mask[i, xi] and mask[i, yi]:
vx = mat[i, xi]
vy = mat[i, yi]
nobs += 1
- sumx += vx
- sumy += vy
+ prev_meanx = meanx
+ prev_meany = meany
+ meanx = meanx + 1 / nobs * (vx - meanx)
+ meany = meany + 1 / nobs * (vy - meany)
+ ssqdmx = ssqdmx + (vx - meanx) * (vx - prev_meanx)
+ ssqdmy = ssqdmy + (vy - meany) * (vy - prev_meany)
+ covxy = covxy + (vx - meanx) * (vy - prev_meany)
if nobs < minpv:
result[xi, yi] = result[yi, xi] = NaN
else:
- meanx = sumx / nobs
- meany = sumy / nobs
-
- # now the cov numerator
- sumx = 0
-
- for i in range(N):
- if mask[i, xi] and mask[i, yi]:
- vx = mat[i, xi] - meanx
- vy = mat[i, yi] - meany
-
- sumx += vx * vy
- sumxx += vx * vx
- sumyy += vy * vy
-
- divisor = (nobs - 1.0) if cov else sqrt(sumxx * sumyy)
+ divisor = (nobs - 1.0) if cov else sqrt(ssqdmx * ssqdmy)
if divisor != 0:
- result[xi, yi] = result[yi, xi] = sumx / divisor
+ result[xi, yi] = result[yi, xi] = covxy / divisor
else:
result[xi, yi] = result[yi, xi] = NaN
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index 7eeeb245534f5..6cea5abcac6d0 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -208,6 +208,25 @@ def test_corr_item_cache(self):
assert df["A"] is ser
assert df.values[0, 0] == 99
+ @pytest.mark.parametrize("length", [2, 20, 200, 2000])
+ def test_corr_for_constant_columns(self, length):
+ # GH: 37448
+ df = DataFrame(length * [[0.4, 0.1]], columns=["A", "B"])
+ result = df.corr()
+ expected = DataFrame(
+ {"A": [np.nan, np.nan], "B": [np.nan, np.nan]}, index=["A", "B"]
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_calc_corr_small_numbers(self):
+ # GH: 37452
+ df = DataFrame(
+ {"A": [1.0e-20, 2.0e-20, 3.0e-20], "B": [1.0e-20, 2.0e-20, 3.0e-20]}
+ )
+ result = df.corr()
+ expected = DataFrame({"A": [1.0, 1.0], "B": [1.0, 1.0]}, index=["A", "B"])
+ tm.assert_frame_equal(result, expected)
+
class TestDataFrameCorrWith:
def test_corrwith(self, datetime_frame):
| - [x] closes #37448
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Floating number issues when summing the same number often enough... | https://api.github.com/repos/pandas-dev/pandas/pulls/37453 | 2020-10-27T22:19:43Z | 2020-10-30T19:57:22Z | 2020-10-30T19:57:22Z | 2020-10-30T20:03:38Z |
CLN: Breakup agg | diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index 4ab0c2515f5fa..639b5f31835d1 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -528,133 +528,44 @@ def transform_str_or_callable(
return func(obj, *args, **kwargs)
-def aggregate(obj, arg: AggFuncType, *args, **kwargs):
+def aggregate(
+ obj,
+ arg: AggFuncType,
+ *args,
+ **kwargs,
+):
"""
- provide an implementation for the aggregators
+ Provide an implementation for the aggregators.
Parameters
----------
- arg : string, dict, function
- *args : args to pass on to the function
- **kwargs : kwargs to pass on to the function
+ obj : Pandas object to compute aggregation on.
+ arg : string, dict, function.
+ *args : args to pass on to the function.
+ **kwargs : kwargs to pass on to the function.
Returns
-------
- tuple of result, how
+ tuple of result, how.
Notes
-----
how can be a string describe the required post-processing, or
- None if not required
+ None if not required.
"""
- is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
-
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(obj, "axis", 0)
if isinstance(arg, str):
return obj._try_aggregate_string_function(arg, *args, **kwargs), None
-
- if isinstance(arg, dict):
- # aggregate based on the passed dict
- if _axis != 0: # pragma: no cover
- raise ValueError("Can only pass dict with axis=0")
-
- selected_obj = obj._selected_obj
-
- # if we have a dict of any non-scalars
- # eg. {'A' : ['mean']}, normalize all to
- # be list-likes
- if any(is_aggregator(x) for x in arg.values()):
- new_arg: Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]] = {}
- for k, v in arg.items():
- if not isinstance(v, (tuple, list, dict)):
- new_arg[k] = [v]
- else:
- new_arg[k] = v
-
- # the keys must be in the columns
- # for ndim=2, or renamers for ndim=1
-
- # ok for now, but deprecated
- # {'A': { 'ra': 'mean' }}
- # {'A': { 'ra': ['mean'] }}
- # {'ra': ['mean']}
-
- # not ok
- # {'ra' : { 'A' : 'mean' }}
- if isinstance(v, dict):
- raise SpecificationError("nested renamer is not supported")
- elif isinstance(selected_obj, ABCSeries):
- raise SpecificationError("nested renamer is not supported")
- elif (
- isinstance(selected_obj, ABCDataFrame)
- and k not in selected_obj.columns
- ):
- raise KeyError(f"Column '{k}' does not exist!")
-
- arg = new_arg
-
- else:
- # deprecation of renaming keys
- # GH 15931
- keys = list(arg.keys())
- if isinstance(selected_obj, ABCDataFrame) and len(
- selected_obj.columns.intersection(keys)
- ) != len(keys):
- cols = sorted(set(keys) - set(selected_obj.columns.intersection(keys)))
- raise SpecificationError(f"Column(s) {cols} do not exist")
-
- from pandas.core.reshape.concat import concat
-
- if selected_obj.ndim == 1:
- # key only used for output
- colg = obj._gotitem(obj._selection, ndim=1)
- results = {key: colg.agg(how) for key, how in arg.items()}
- else:
- # key used for column selection and output
- results = {
- key: obj._gotitem(key, ndim=1).agg(how) for key, how in arg.items()
- }
-
- # set the final keys
- keys = list(arg.keys())
-
- # Avoid making two isinstance calls in all and any below
- is_ndframe = [isinstance(r, ABCNDFrame) for r in results.values()]
-
- # combine results
- if all(is_ndframe):
- keys_to_use = [k for k in keys if not results[k].empty]
- # Have to check, if at least one DataFrame is not empty.
- keys_to_use = keys_to_use if keys_to_use != [] else keys
- axis = 0 if isinstance(obj, ABCSeries) else 1
- result = concat({k: results[k] for k in keys_to_use}, axis=axis)
- elif any(is_ndframe):
- # There is a mix of NDFrames and scalars
- raise ValueError(
- "cannot perform both aggregation "
- "and transformation operations "
- "simultaneously"
- )
- else:
- from pandas import Series
-
- # we have a dict of scalars
- # GH 36212 use name only if obj is a series
- if obj.ndim == 1:
- obj = cast("Series", obj)
- name = obj.name
- else:
- name = None
-
- result = Series(results, name=name)
-
- return result, True
+ elif is_dict_like(arg):
+ arg = cast(Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]], arg)
+ return agg_dict_like(obj, arg, _axis), True
elif is_list_like(arg):
# we require a list, but not an 'str'
- return aggregate_multiple_funcs(obj, arg, _axis=_axis), None
+ arg = cast(List[AggFuncTypeBase], arg)
+ return agg_list_like(obj, arg, _axis=_axis), None
else:
result = None
@@ -667,7 +578,26 @@ def aggregate(obj, arg: AggFuncType, *args, **kwargs):
return result, True
-def aggregate_multiple_funcs(obj, arg, _axis):
+def agg_list_like(
+ obj,
+ arg: List[AggFuncTypeBase],
+ _axis: int,
+) -> FrameOrSeriesUnion:
+ """
+ Compute aggregation in the case of a list-like argument.
+
+ Parameters
+ ----------
+ obj : Pandas object to compute aggregation on.
+ arg : list
+ Aggregations to compute.
+ _axis : int, 0 or 1
+ Axis to compute aggregation on.
+
+ Returns
+ -------
+ Result of aggregation.
+ """
from pandas.core.reshape.concat import concat
if _axis != 0:
@@ -738,3 +668,118 @@ def aggregate_multiple_funcs(obj, arg, _axis):
"cannot combine transform and aggregation operations"
) from err
return result
+
+
+def agg_dict_like(
+ obj,
+ arg: Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]],
+ _axis: int,
+) -> FrameOrSeriesUnion:
+ """
+ Compute aggregation in the case of a dict-like argument.
+
+ Parameters
+ ----------
+ obj : Pandas object to compute aggregation on.
+ arg : dict
+ label-aggregation pairs to compute.
+ _axis : int, 0 or 1
+ Axis to compute aggregation on.
+
+ Returns
+ -------
+ Result of aggregation.
+ """
+ is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
+
+ if _axis != 0: # pragma: no cover
+ raise ValueError("Can only pass dict with axis=0")
+
+ selected_obj = obj._selected_obj
+
+ # if we have a dict of any non-scalars
+ # eg. {'A' : ['mean']}, normalize all to
+ # be list-likes
+ if any(is_aggregator(x) for x in arg.values()):
+ new_arg: Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]] = {}
+ for k, v in arg.items():
+ if not isinstance(v, (tuple, list, dict)):
+ new_arg[k] = [v]
+ else:
+ new_arg[k] = v
+
+ # the keys must be in the columns
+ # for ndim=2, or renamers for ndim=1
+
+ # ok for now, but deprecated
+ # {'A': { 'ra': 'mean' }}
+ # {'A': { 'ra': ['mean'] }}
+ # {'ra': ['mean']}
+
+ # not ok
+ # {'ra' : { 'A' : 'mean' }}
+ if isinstance(v, dict):
+ raise SpecificationError("nested renamer is not supported")
+ elif isinstance(selected_obj, ABCSeries):
+ raise SpecificationError("nested renamer is not supported")
+ elif (
+ isinstance(selected_obj, ABCDataFrame) and k not in selected_obj.columns
+ ):
+ raise KeyError(f"Column '{k}' does not exist!")
+
+ arg = new_arg
+
+ else:
+ # deprecation of renaming keys
+ # GH 15931
+ keys = list(arg.keys())
+ if isinstance(selected_obj, ABCDataFrame) and len(
+ selected_obj.columns.intersection(keys)
+ ) != len(keys):
+ cols = sorted(set(keys) - set(selected_obj.columns.intersection(keys)))
+ raise SpecificationError(f"Column(s) {cols} do not exist")
+
+ from pandas.core.reshape.concat import concat
+
+ if selected_obj.ndim == 1:
+ # key only used for output
+ colg = obj._gotitem(obj._selection, ndim=1)
+ results = {key: colg.agg(how) for key, how in arg.items()}
+ else:
+ # key used for column selection and output
+ results = {key: obj._gotitem(key, ndim=1).agg(how) for key, how in arg.items()}
+
+ # set the final keys
+ keys = list(arg.keys())
+
+ # Avoid making two isinstance calls in all and any below
+ is_ndframe = [isinstance(r, ABCNDFrame) for r in results.values()]
+
+ # combine results
+ if all(is_ndframe):
+ keys_to_use = [k for k in keys if not results[k].empty]
+ # Have to check, if at least one DataFrame is not empty.
+ keys_to_use = keys_to_use if keys_to_use != [] else keys
+ axis = 0 if isinstance(obj, ABCSeries) else 1
+ result = concat({k: results[k] for k in keys_to_use}, axis=axis)
+ elif any(is_ndframe):
+ # There is a mix of NDFrames and scalars
+ raise ValueError(
+ "cannot perform both aggregation "
+ "and transformation operations "
+ "simultaneously"
+ )
+ else:
+ from pandas import Series
+
+ # we have a dict of scalars
+ # GH 36212 use name only if obj is a series
+ if obj.ndim == 1:
+ obj = cast("Series", obj)
+ name = obj.name
+ else:
+ name = None
+
+ result = Series(results, name=name)
+
+ return result
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 9c78166ce0480..7f1c56d46fb90 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -54,8 +54,8 @@
from pandas.core.dtypes.missing import isna, notna
from pandas.core.aggregation import (
+ agg_list_like,
aggregate,
- aggregate_multiple_funcs,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
@@ -968,7 +968,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
# try to treat as if we are passing a list
try:
- result = aggregate_multiple_funcs(self, [func], _axis=self.axis)
+ result = agg_list_like(self, [func], _axis=self.axis)
# select everything except for the last level, which is the one
# containing the name of the function(s), see GH 32040
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Extracted dictionary parts to `agg_dict_like`, and changed from `isinstance(arg, dict)` to `is_dict_like(arg)`. Also renamed `aggregate_multiple_funcs` to `agg_list_like`. | https://api.github.com/repos/pandas-dev/pandas/pulls/37452 | 2020-10-27T22:15:29Z | 2020-10-30T21:55:04Z | 2020-10-30T21:55:04Z | 2020-10-31T02:28:57Z |
TST/REF: collect tests by method from test_io | diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index afc271264edbf..890146f0789ae 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -23,13 +23,14 @@
from warnings import catch_warnings, simplefilter
import zipfile
+import numpy as np
import pytest
from pandas.compat import PY38, get_lzma_file, import_lzma, is_platform_little_endian
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import Index
+from pandas import Index, Series, period_range
import pandas._testing as tm
from pandas.tseries.offsets import Day, MonthEnd
@@ -499,7 +500,7 @@ def __init__(self):
def test_read_pickle_with_subclass():
# GH 12163
- expected = pd.Series(dtype=object), MyTz()
+ expected = Series(dtype=object), MyTz()
result = tm.round_trip_pickle(expected)
tm.assert_series_equal(result[0], expected[0])
@@ -548,3 +549,25 @@ def _test_roundtrip(frame):
_test_roundtrip(frame.T)
_test_roundtrip(ymd)
_test_roundtrip(ymd.T)
+
+
+def test_pickle_timeseries_periodindex():
+ # GH#2891
+ prng = period_range("1/1/2011", "1/1/2012", freq="M")
+ ts = Series(np.random.randn(len(prng)), prng)
+ new_ts = tm.round_trip_pickle(ts)
+ assert new_ts.index.freq == "M"
+
+
+@pytest.mark.parametrize(
+ "name", [777, 777.0, "name", datetime.datetime(2001, 11, 11), (1, 2)]
+)
+def test_pickle_preserve_name(name):
+ def _pickle_roundtrip_name(obj):
+ with tm.ensure_clean() as path:
+ obj.to_pickle(path)
+ unpickled = pd.read_pickle(path)
+ return unpickled
+
+ unpickled = _pickle_roundtrip_name(tm.makeTimeSeries(name=name))
+ assert unpickled.name == name
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/methods/test_to_csv.py
similarity index 75%
rename from pandas/tests/series/test_io.py
rename to pandas/tests/series/methods/test_to_csv.py
index b12ebd58e6a7b..a72e860340f25 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/methods/test_to_csv.py
@@ -5,7 +5,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, Series
+from pandas import Series
import pandas._testing as tm
from pandas.io.common import get_handle
@@ -180,62 +180,3 @@ def test_to_csv_interval_index(self):
expected.index = expected.index.astype(str)
tm.assert_series_equal(result, expected)
-
-
-class TestSeriesIO:
- def test_to_frame(self, datetime_series):
- datetime_series.name = None
- rs = datetime_series.to_frame()
- xp = pd.DataFrame(datetime_series.values, index=datetime_series.index)
- tm.assert_frame_equal(rs, xp)
-
- datetime_series.name = "testname"
- rs = datetime_series.to_frame()
- xp = pd.DataFrame(
- dict(testname=datetime_series.values), index=datetime_series.index
- )
- tm.assert_frame_equal(rs, xp)
-
- rs = datetime_series.to_frame(name="testdifferent")
- xp = pd.DataFrame(
- dict(testdifferent=datetime_series.values), index=datetime_series.index
- )
- tm.assert_frame_equal(rs, xp)
-
- def test_timeseries_periodindex(self):
- # GH2891
- from pandas import period_range
-
- prng = period_range("1/1/2011", "1/1/2012", freq="M")
- ts = Series(np.random.randn(len(prng)), prng)
- new_ts = tm.round_trip_pickle(ts)
- assert new_ts.index.freq == "M"
-
- def test_pickle_preserve_name(self):
- for n in [777, 777.0, "name", datetime(2001, 11, 11), (1, 2)]:
- unpickled = self._pickle_roundtrip_name(tm.makeTimeSeries(name=n))
- assert unpickled.name == n
-
- def _pickle_roundtrip_name(self, obj):
-
- with tm.ensure_clean() as path:
- obj.to_pickle(path)
- unpickled = pd.read_pickle(path)
- return unpickled
-
- def test_to_frame_expanddim(self):
- # GH 9762
-
- class SubclassedSeries(Series):
- @property
- def _constructor_expanddim(self):
- return SubclassedFrame
-
- class SubclassedFrame(DataFrame):
- pass
-
- s = SubclassedSeries([1, 2, 3], name="X")
- result = s.to_frame()
- assert isinstance(result, SubclassedFrame)
- expected = SubclassedFrame({"X": [1, 2, 3]})
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_to_frame.py b/pandas/tests/series/methods/test_to_frame.py
new file mode 100644
index 0000000000000..b324fab5d97d4
--- /dev/null
+++ b/pandas/tests/series/methods/test_to_frame.py
@@ -0,0 +1,40 @@
+from pandas import DataFrame, Series
+import pandas._testing as tm
+
+
+class TestToFrame:
+ def test_to_frame(self, datetime_series):
+ datetime_series.name = None
+ rs = datetime_series.to_frame()
+ xp = DataFrame(datetime_series.values, index=datetime_series.index)
+ tm.assert_frame_equal(rs, xp)
+
+ datetime_series.name = "testname"
+ rs = datetime_series.to_frame()
+ xp = DataFrame(
+ dict(testname=datetime_series.values), index=datetime_series.index
+ )
+ tm.assert_frame_equal(rs, xp)
+
+ rs = datetime_series.to_frame(name="testdifferent")
+ xp = DataFrame(
+ dict(testdifferent=datetime_series.values), index=datetime_series.index
+ )
+ tm.assert_frame_equal(rs, xp)
+
+ def test_to_frame_expanddim(self):
+ # GH#9762
+
+ class SubclassedSeries(Series):
+ @property
+ def _constructor_expanddim(self):
+ return SubclassedFrame
+
+ class SubclassedFrame(DataFrame):
+ pass
+
+ ser = SubclassedSeries([1, 2, 3], name="X")
+ result = ser.to_frame()
+ assert isinstance(result, SubclassedFrame)
+ expected = SubclassedFrame({"X": [1, 2, 3]})
+ tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37451 | 2020-10-27T18:44:32Z | 2020-10-29T01:00:05Z | 2020-10-29T01:00:05Z | 2020-10-29T02:02:20Z |
DEPS: Update cython | diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index 46640505a4c84..b1ea2682b7ea7 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -42,7 +42,7 @@
// followed by the pip installed packages).
"matrix": {
"numpy": [],
- "Cython": ["0.29.30"],
+ "Cython": ["0.29.32"],
"matplotlib": [],
"sqlalchemy": [],
"scipy": [],
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index 65918005ad6f1..6a2f8b4682043 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.10
# test dependencies
- - cython>=0.29.30
+ - cython>=0.29.32
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-38-downstream_compat.yaml
index f89d4a743a6f1..9c38f81de3f96 100644
--- a/ci/deps/actions-38-downstream_compat.yaml
+++ b/ci/deps/actions-38-downstream_compat.yaml
@@ -6,7 +6,7 @@ dependencies:
- python=3.8
# test dependencies
- - cython>=0.29.30
+ - cython>=0.29.32
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index a57c7279e2e9b..365be2d54d24d 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -7,7 +7,7 @@ dependencies:
- python=3.8.0
# test dependencies
- - cython>=0.29.30
+ - cython>=0.29.32
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml
index a4473f5911903..5b55bf7454030 100644
--- a/ci/deps/actions-38.yaml
+++ b/ci/deps/actions-38.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8
# test dependencies
- - cython>=0.29.30
+ - cython>=0.29.32
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 8605a9f4520d7..46705ca428705 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.9
# test dependencies
- - cython>=0.29.30
+ - cython>=0.29.32
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-pypy-38.yaml b/ci/deps/actions-pypy-38.yaml
index 1a3c73cb4ae2f..e06b992acc191 100644
--- a/ci/deps/actions-pypy-38.yaml
+++ b/ci/deps/actions-pypy-38.yaml
@@ -8,7 +8,7 @@ dependencies:
- python=3.8[build=*_pypy] # TODO: use this once pypy3.8 is available
# tools
- - cython>=0.29.30
+ - cython>=0.29.32
- pytest>=6.0
- pytest-cov
- pytest-asyncio
diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-38-arm64.yaml
index e76b3071bd8bb..1c614729331e2 100644
--- a/ci/deps/circle-38-arm64.yaml
+++ b/ci/deps/circle-38-arm64.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8
# test dependencies
- - cython>=0.29.30
+ - cython>=0.29.32
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index a49772fb83ca7..8fc36cb0c8543 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -14,6 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
+- Fixed regression in taking NULL :class:`objects` from a :class:`DataFrame` causing a segmentation violation. These NULL values are created by :meth:`numpy.empty_like` (:issue:`46848`)
- Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`)
- Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`)
- Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`)
@@ -36,7 +37,7 @@ Bug fixes
Other
~~~~~
--
+- The minimum version of Cython needed to compile pandas is now ``0.29.32`` (:issue:`47978`)
-
.. ---------------------------------------------------------------------------
diff --git a/environment.yml b/environment.yml
index ec2e0a3860432..7b4c537c0bcd9 100644
--- a/environment.yml
+++ b/environment.yml
@@ -6,7 +6,7 @@ dependencies:
- python=3.8
# test dependencies
- - cython=0.29.30
+ - cython=0.29.32
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 24d7365b52159..2849d266db40e 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1227,6 +1227,13 @@ def test_iloc_setitem_nullable_2d_values(self):
df.iloc[:] = df.iloc[:, :]
tm.assert_frame_equal(df, orig)
+ def test_getitem_segfault_with_empty_like_object(self):
+ # GH#46848
+ df = DataFrame(np.empty((1, 1), dtype=object))
+ df[0] = np.empty_like(df[0])
+ # this produces the segfault
+ df[[0]]
+
@pytest.mark.parametrize(
"null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")]
)
diff --git a/pyproject.toml b/pyproject.toml
index 6ca37581b03f0..67c56123a847c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@
requires = [
"setuptools>=51.0.0",
"wheel",
- "Cython>=0.29.24,<3", # Note: sync with setup.py, environment.yml and asv.conf.json
+ "Cython>=0.29.32,<3", # Note: sync with setup.py, environment.yml and asv.conf.json
"oldest-supported-numpy>=0.10"
]
# uncomment to enable pep517 after versioneer problem is fixed.
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 5a9647456cb0b..7c7271bb2d8b7 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,7 +1,7 @@
# This file is auto-generated from environment.yml, do not modify.
# See that file for comments about the need/usage of each dependency.
-cython==0.29.30
+cython==0.29.32
pytest>=6.0
pytest-cov
pytest-xdist>=1.31
diff --git a/setup.py b/setup.py
index 70adbd3c083af..12e8aa36c3794 100755
--- a/setup.py
+++ b/setup.py
@@ -38,7 +38,7 @@ def is_platform_mac():
# note: sync with pyproject.toml, environment.yml and asv.conf.json
-min_cython_ver = "0.29.30"
+min_cython_ver = "0.29.32"
try:
from Cython import (
| - [x] xref #47978
- [x] xref #46848 | https://api.github.com/repos/pandas-dev/pandas/pulls/47979 | 2022-08-05T13:10:09Z | 2022-08-08T19:00:00Z | 2022-08-08T19:00:00Z | 2022-08-08T21:29:07Z |
REF: Move Series logic from Index._get_values_for_loc | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6620cc1786340..4b47d71c453c8 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -140,6 +140,7 @@
ensure_index,
)
import pandas.core.indexes.base as ibase
+from pandas.core.indexes.multi import maybe_droplevels
from pandas.core.indexing import (
check_bool_indexer,
check_deprecated_indexers,
@@ -1062,7 +1063,24 @@ def _get_value(self, label, takeable: bool = False):
# Similar to Index.get_value, but we do not fall back to positional
loc = self.index.get_loc(label)
- return self.index._get_values_for_loc(self, loc, label)
+
+ if is_integer(loc):
+ return self._values[loc]
+
+ if isinstance(self.index, MultiIndex):
+ mi = self.index
+ new_values = self._values[loc]
+ if len(new_values) == 1 and mi.nlevels == 1:
+ # If more than one level left, we can not return a scalar
+ return new_values[0]
+
+ new_index = mi[loc]
+ new_index = maybe_droplevels(new_index, label)
+ new_ser = self._constructor(new_values, index=new_index, name=self.name)
+ return new_ser.__finalize__(self)
+
+ else:
+ return self.iloc[loc]
def __setitem__(self, key, value) -> None:
check_deprecated_indexers(key)
| Cleaner abstraction. | https://api.github.com/repos/pandas-dev/pandas/pulls/47975 | 2022-08-04T23:01:27Z | 2022-10-24T21:30:49Z | 2022-10-24T21:30:49Z | 2022-10-24T21:34:23Z |
TST/CLN: Consolidate creation of groupby method args | diff --git a/pandas/tests/groupby/__init__.py b/pandas/tests/groupby/__init__.py
index e69de29bb2d1d..c63aa568a15dc 100644
--- a/pandas/tests/groupby/__init__.py
+++ b/pandas/tests/groupby/__init__.py
@@ -0,0 +1,27 @@
+def get_groupby_method_args(name, obj):
+ """
+ Get required arguments for a groupby method.
+
+ When parametrizing a test over groupby methods (e.g. "sum", "mean", "fillna"),
+ it is often the case that arguments are required for certain methods.
+
+ Parameters
+ ----------
+ name: str
+ Name of the method.
+ obj: Series or DataFrame
+ pandas object that is being grouped.
+
+ Returns
+ -------
+ A tuple of required arguments for the method.
+ """
+ if name in ("nth", "fillna", "take"):
+ return (0,)
+ if name == "quantile":
+ return (0.5,)
+ if name == "corrwith":
+ return (obj,)
+ if name == "tshift":
+ return (0, 0)
+ return ()
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 4cfc3ea41543b..b064c12f89c21 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -17,6 +17,7 @@
)
import pandas._testing as tm
from pandas.core.api import Int64Index
+from pandas.tests.groupby import get_groupby_method_args
def test_apply_issues():
@@ -1069,7 +1070,7 @@ def test_apply_is_unchanged_when_other_methods_are_called_first(reduction_func):
# Check output when another method is called before .apply()
grp = df.groupby(by="a")
- args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
+ args = get_groupby_method_args(reduction_func, df)
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
_ = getattr(grp, reduction_func)(*args)
result = grp.apply(sum)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 004e55f4d161f..6d22c676a3c16 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -14,6 +14,7 @@
qcut,
)
import pandas._testing as tm
+from pandas.tests.groupby import get_groupby_method_args
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
@@ -1373,7 +1374,7 @@ def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, r
"value": [0.1] * 4,
}
)
- args = {"nth": [0]}.get(reduction_func, [])
+ args = get_groupby_method_args(reduction_func, df)
expected_length = 4 if observed else 16
@@ -1409,7 +1410,7 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
}
)
unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]
- args = {"nth": [0]}.get(reduction_func, [])
+ args = get_groupby_method_args(reduction_func, df)
series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]
agg = getattr(series_groupby, reduction_func)
@@ -1450,7 +1451,7 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_fun
df_grp = df.groupby(["cat_1", "cat_2"], observed=True)
- args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
+ args = get_groupby_method_args(reduction_func, df)
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
res = getattr(df_grp, reduction_func)(*args)
@@ -1482,7 +1483,7 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)
- args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
+ args = get_groupby_method_args(reduction_func, df)
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
res = getattr(df_grp, reduction_func)(*args)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index dda583e3a1962..93e9b5bb776ab 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -18,6 +18,7 @@
)
import pandas._testing as tm
import pandas.core.nanops as nanops
+from pandas.tests.groupby import get_groupby_method_args
from pandas.util import _test_decorators as td
@@ -570,7 +571,7 @@ def test_axis1_numeric_only(request, groupby_func, numeric_only):
groups = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4]
gb = df.groupby(groups)
method = getattr(gb, groupby_func)
- args = (0,) if groupby_func == "fillna" else ()
+ args = get_groupby_method_args(groupby_func, df)
kwargs = {"axis": 1}
if numeric_only is not None:
# when numeric_only is None we don't pass any argument
@@ -1366,12 +1367,7 @@ def test_deprecate_numeric_only(
# has_arg: Whether the op has a numeric_only arg
df = DataFrame({"a1": [1, 1], "a2": [2, 2], "a3": [5, 6], "b": 2 * [object]})
- if kernel == "corrwith":
- args = (df,)
- elif kernel == "nth" or kernel == "fillna":
- args = (0,)
- else:
- args = ()
+ args = get_groupby_method_args(kernel, df)
kwargs = {} if numeric_only is lib.no_default else {"numeric_only": numeric_only}
gb = df.groupby(keys)
@@ -1451,22 +1447,7 @@ def test_deprecate_numeric_only_series(dtype, groupby_func, request):
expected_gb = expected_ser.groupby(grouper)
expected_method = getattr(expected_gb, groupby_func)
- if groupby_func == "corrwith":
- args = (ser,)
- elif groupby_func == "corr":
- args = (ser,)
- elif groupby_func == "cov":
- args = (ser,)
- elif groupby_func == "nth":
- args = (0,)
- elif groupby_func == "fillna":
- args = (True,)
- elif groupby_func == "take":
- args = ([0],)
- elif groupby_func == "quantile":
- args = (0.5,)
- else:
- args = ()
+ args = get_groupby_method_args(groupby_func, ser)
fails_on_numeric_object = (
"corr",
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 73aeb17d8c274..a6ab13270c4dc 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -29,6 +29,7 @@
from pandas.core.arrays import BooleanArray
import pandas.core.common as com
from pandas.core.groupby.base import maybe_normalize_deprecated_kernels
+from pandas.tests.groupby import get_groupby_method_args
def test_repr():
@@ -2366,14 +2367,10 @@ def test_dup_labels_output_shape(groupby_func, idx):
df = DataFrame([[1, 1]], columns=idx)
grp_by = df.groupby([0])
- args = []
- if groupby_func in {"fillna", "nth"}:
- args.append(0)
- elif groupby_func == "corrwith":
- args.append(df)
- elif groupby_func == "tshift":
+ if groupby_func == "tshift":
df.index = [Timestamp("today")]
- args.extend([1, "D"])
+ # args.extend([1, "D"])
+ args = get_groupby_method_args(groupby_func, df)
with tm.assert_produces_warning(warn, match="is deprecated"):
result = getattr(grp_by, groupby_func)(*args)
diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py
index b665843728165..fddf0c86d0ab1 100644
--- a/pandas/tests/groupby/test_groupby_subclass.py
+++ b/pandas/tests/groupby/test_groupby_subclass.py
@@ -10,6 +10,7 @@
)
import pandas._testing as tm
from pandas.core.groupby.base import maybe_normalize_deprecated_kernels
+from pandas.tests.groupby import get_groupby_method_args
@pytest.mark.parametrize(
@@ -34,13 +35,7 @@ def test_groupby_preserves_subclass(obj, groupby_func):
# Groups should preserve subclass type
assert isinstance(grouped.get_group(0), type(obj))
- args = []
- if groupby_func in {"fillna", "nth"}:
- args.append(0)
- elif groupby_func == "corrwith":
- args.append(obj)
- elif groupby_func == "tshift":
- args.extend([0, 0])
+ args = get_groupby_method_args(groupby_func, obj)
with tm.assert_produces_warning(warn, match="is deprecated"):
result1 = getattr(grouped, groupby_func)(*args)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 5c64ba3d9e266..d2928c52c33e2 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -22,6 +22,7 @@
import pandas._testing as tm
from pandas.core.groupby.base import maybe_normalize_deprecated_kernels
from pandas.core.groupby.generic import DataFrameGroupBy
+from pandas.tests.groupby import get_groupby_method_args
def assert_fp_equal(a, b):
@@ -172,14 +173,10 @@ def test_transform_axis_1(request, transformation_func):
msg = "ngroup fails with axis=1: #45986"
request.node.add_marker(pytest.mark.xfail(reason=msg))
- warn = None
- if transformation_func == "tshift":
- warn = FutureWarning
-
- request.node.add_marker(pytest.mark.xfail(reason="tshift is deprecated"))
- args = ("ffill",) if transformation_func == "fillna" else ()
+ warn = FutureWarning if transformation_func == "tshift" else None
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"])
+ args = get_groupby_method_args(transformation_func, df)
with tm.assert_produces_warning(warn):
result = df.groupby([0, 0, 1], axis=1).transform(transformation_func, *args)
expected = df.T.groupby([0, 0, 1]).transform(transformation_func, *args).T
@@ -1168,7 +1165,7 @@ def test_transform_agg_by_name(request, reduction_func, obj):
pytest.mark.xfail(reason="TODO: implement SeriesGroupBy.corrwith")
)
- args = {"nth": [0], "quantile": [0.5], "corrwith": [obj]}.get(func, [])
+ args = get_groupby_method_args(reduction_func, obj)
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
result = g.transform(func, *args)
@@ -1370,12 +1367,7 @@ def test_null_group_str_reducer(request, dropna, reduction_func):
df = DataFrame({"A": [1, 1, np.nan, np.nan], "B": [1, 2, 2, 3]}, index=index)
gb = df.groupby("A", dropna=dropna)
- if reduction_func == "corrwith":
- args = (df["B"],)
- elif reduction_func == "nth":
- args = (0,)
- else:
- args = ()
+ args = get_groupby_method_args(reduction_func, df)
# Manually handle reducers that don't fit the generic pattern
# Set expected with dropna=False, then replace if necessary
@@ -1418,8 +1410,8 @@ def test_null_group_str_transformer(request, dropna, transformation_func):
if transformation_func == "tshift":
msg = "tshift requires timeseries"
request.node.add_marker(pytest.mark.xfail(reason=msg))
- args = (0,) if transformation_func == "fillna" else ()
df = DataFrame({"A": [1, 1, np.nan], "B": [1, 2, 2]}, index=[1, 2, 3])
+ args = get_groupby_method_args(transformation_func, df)
gb = df.groupby("A", dropna=dropna)
buffer = []
@@ -1461,12 +1453,7 @@ def test_null_group_str_reducer_series(request, dropna, reduction_func):
ser = Series([1, 2, 2, 3], index=index)
gb = ser.groupby([1, 1, np.nan, np.nan], dropna=dropna)
- if reduction_func == "corrwith":
- args = (ser,)
- elif reduction_func == "nth":
- args = (0,)
- else:
- args = ()
+ args = get_groupby_method_args(reduction_func, ser)
# Manually handle reducers that don't fit the generic pattern
# Set expected with dropna=False, then replace if necessary
@@ -1506,8 +1493,8 @@ def test_null_group_str_transformer_series(request, dropna, transformation_func)
if transformation_func == "tshift":
msg = "tshift requires timeseries"
request.node.add_marker(pytest.mark.xfail(reason=msg))
- args = (0,) if transformation_func == "fillna" else ()
ser = Series([1, 2, 2], index=[1, 2, 3])
+ args = get_groupby_method_args(transformation_func, ser)
gb = ser.groupby([1, 1, np.nan], dropna=dropna)
buffer = []
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Wasn't sure if it was better to put the helper in `__init__` or create `common`. | https://api.github.com/repos/pandas-dev/pandas/pulls/47973 | 2022-08-04T21:23:17Z | 2022-08-08T21:57:19Z | 2022-08-08T21:57:19Z | 2022-08-09T21:12:27Z |
BUG: pivot_table raising TypeError with ea dtype and dropna True | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index bdf811f6a8f6a..c54730a78ad36 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -1027,6 +1027,7 @@ Reshaping
- Bug in :func:`concat` losing dtype of columns when ``join="outer"`` and ``sort=True`` (:issue:`47329`)
- Bug in :func:`concat` not sorting the column names when ``None`` is included (:issue:`47331`)
- Bug in :func:`concat` with identical key leads to error when indexing :class:`MultiIndex` (:issue:`46519`)
+- Bug in :func:`pivot_table` raising ``TypeError`` when ``dropna=True`` and aggregation column has extension array dtype (:issue:`47477`)
- Bug in :meth:`DataFrame.join` with a list when using suffixes to join DataFrames with duplicate column names (:issue:`46396`)
- Bug in :meth:`DataFrame.pivot_table` with ``sort=False`` results in sorted index (:issue:`17041`)
- Bug in :meth:`concat` when ``axis=1`` and ``sort=False`` where the resulting Index was a :class:`Int64Index` instead of a :class:`RangeIndex` (:issue:`46675`)
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 5226c928c6f73..867835ef7f0a3 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -178,7 +178,9 @@ def __internal_pivot_table(
and v in agged
and not is_integer_dtype(agged[v])
):
- if not isinstance(agged[v], ABCDataFrame):
+ if not isinstance(agged[v], ABCDataFrame) and isinstance(
+ data[v].dtype, np.dtype
+ ):
# exclude DataFrame case bc maybe_downcast_to_dtype expects
# ArrayLike
# e.g. test_pivot_table_multiindex_columns_doctest_case
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 8312e3b9de9a7..e2eed4358aaaa 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -2223,6 +2223,21 @@ def test_pivot_table_with_margins_and_numeric_columns(self):
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize("dropna", [True, False])
+ def test_pivot_ea_dtype_dropna(self, dropna):
+ # GH#47477
+ df = DataFrame({"x": "a", "y": "b", "age": Series([20, 40], dtype="Int64")})
+ result = df.pivot_table(
+ index="x", columns="y", values="age", aggfunc="mean", dropna=dropna
+ )
+ expected = DataFrame(
+ [[30]],
+ index=Index(["a"], name="x"),
+ columns=Index(["b"], name="y"),
+ dtype="Float64",
+ )
+ tm.assert_frame_equal(result, expected)
+
class TestPivot:
def test_pivot(self):
| - [x] closes #47477 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
The aggregation function already provides the correct dtype. In case of numpy dtype we are casting to many cases, see #47971 | https://api.github.com/repos/pandas-dev/pandas/pulls/47972 | 2022-08-04T21:21:29Z | 2022-08-08T22:12:22Z | 2022-08-08T22:12:22Z | 2022-08-09T06:38:56Z |
REF: Use `Styler` implementation for `DataFrame.to_latex` | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index cb51365fa27cd..e3a4c2bc0daef 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -452,6 +452,37 @@ Now, the axes return an empty :class:`RangeIndex`.
pd.Series().index
pd.DataFrame().axes
+.. _whatsnew_200.api_breaking.to_latex:
+
+DataFrame to LaTeX has a new render engine
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The existing :meth:`DataFrame.to_latex` has been restructured to utilise the
+extended implementation previously available under :meth:`.Styler.to_latex`.
+The arguments signature is similar, albeit ``col_space`` has been removed since
+it is ignored by LaTeX engines. This render engine also requires ``jinja2`` as a
+dependency which needs to be installed, since rendering is based upon jinja2 templates.
+
+The pandas options below are no longer used and will be removed in future releases.
+The alternative options giving similar functionality are indicated below:
+
+- ``display.latex.escape``: replaced with ``styler.format.escape``,
+- ``display.latex.longtable``: replaced with ``styler.latex.environment``,
+- ``display.latex.multicolumn``, ``display.latex.multicolumn_format`` and
+ ``display.latex.multirow``: replaced with ``styler.sparse.rows``,
+ ``styler.sparse.columns``, ``styler.latex.multirow_align`` and
+ ``styler.latex.multicol_align``,
+- ``display.latex.repr``: replaced with ``styler.render.repr``,
+- ``display.max_rows`` and ``display.max_columns``: replace with
+ ``styler.render.max_rows``, ``styler.render.max_columns`` and
+ ``styler.render.max_elements``.
+
+Note that the behaviour of ``_repr_latex_`` is also changed. Previously
+setting ``display.latex.repr`` would generate LaTeX only when using nbconvert for a
+JupyterNotebook, and not when the user is running the notebook. Now the
+``styler.render.repr`` option allows control of the specific output
+within JupyterNotebooks for operations (not just on nbconvert). See :issue:`39911`.
+
.. _whatsnew_200.api_breaking.deps:
Increased minimum versions for dependencies
@@ -617,6 +648,7 @@ Removal of prior version deprecations/changes
- Removed deprecated :meth:`.Styler.set_na_rep` and :meth:`.Styler.set_precision` (:issue:`49397`)
- Removed deprecated :meth:`.Styler.where` (:issue:`49397`)
- Removed deprecated :meth:`.Styler.render` (:issue:`49397`)
+- Removed deprecated argument ``col_space`` in :meth:`DataFrame.to_latex` (:issue:`47970`)
- Removed deprecated argument ``null_color`` in :meth:`.Styler.highlight_null` (:issue:`49397`)
- Removed deprecated argument ``check_less_precise`` in :meth:`.testing.assert_frame_equal`, :meth:`.testing.assert_extension_array_equal`, :meth:`.testing.assert_series_equal`, :meth:`.testing.assert_index_equal` (:issue:`30562`)
- Removed deprecated ``null_counts`` argument in :meth:`DataFrame.info`. Use ``show_counts`` instead (:issue:`37999`)
@@ -791,6 +823,7 @@ Removal of prior version deprecations/changes
- Changed behavior of comparison of ``NaT`` with a ``datetime.date`` object; these now raise on inequality comparisons (:issue:`39196`)
- Enforced deprecation of silently dropping columns that raised a ``TypeError`` in :class:`Series.transform` and :class:`DataFrame.transform` when used with a list or dictionary (:issue:`43740`)
- Changed behavior of :meth:`DataFrame.apply` with list-like so that any partial failure will raise an error (:issue:`43740`)
+- Changed behaviour of :meth:`DataFrame.to_latex` to now use the Styler implementation via :meth:`.Styler.to_latex` (:issue:`47970`)
- Changed behavior of :meth:`Series.__setitem__` with an integer key and a :class:`Float64Index` when the key is not present in the index; previously we treated the key as positional (behaving like ``series.iloc[key] = val``), now we treat it is a label (behaving like ``series.loc[key] = val``), consistent with :meth:`Series.__getitem__`` behavior (:issue:`33469`)
- Removed ``na_sentinel`` argument from :func:`factorize`, :meth:`.Index.factorize`, and :meth:`.ExtensionArray.factorize` (:issue:`47157`)
- Changed behavior of :meth:`Series.diff` and :meth:`DataFrame.diff` with :class:`ExtensionDtype` dtypes whose arrays do not implement ``diff``, these now raise ``TypeError`` rather than casting to numpy (:issue:`31025`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 700601cb4f024..6ebba70839dd9 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3,6 +3,7 @@
import collections
import datetime as dt
+from functools import partial
import gc
from json import loads
import operator
@@ -48,7 +49,6 @@
ArrayLike,
Axis,
AxisInt,
- ColspaceArgType,
CompressionOptions,
Dtype,
DtypeArg,
@@ -182,7 +182,6 @@
Window,
)
-from pandas.io.formats import format as fmt
from pandas.io.formats.format import (
DataFrameFormatter,
DataFrameRenderer,
@@ -2108,7 +2107,7 @@ def _repr_latex_(self):
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
- if config.get_option("display.latex.repr"):
+ if config.get_option("styler.render.repr") == "latex":
return self.to_latex()
else:
return None
@@ -3146,7 +3145,6 @@ def to_latex(
self,
buf: None = ...,
columns: Sequence[Hashable] | None = ...,
- col_space: ColspaceArgType | None = ...,
header: bool_t | Sequence[str] = ...,
index: bool_t = ...,
na_rep: str = ...,
@@ -3174,7 +3172,6 @@ def to_latex(
self,
buf: FilePath | WriteBuffer[str],
columns: Sequence[Hashable] | None = ...,
- col_space: ColspaceArgType | None = ...,
header: bool_t | Sequence[str] = ...,
index: bool_t = ...,
na_rep: str = ...,
@@ -3198,12 +3195,10 @@ def to_latex(
...
@final
- @doc(returns=fmt.return_docstring)
def to_latex(
self,
buf: FilePath | WriteBuffer[str] | None = None,
columns: Sequence[Hashable] | None = None,
- col_space: ColspaceArgType | None = None,
header: bool_t | Sequence[str] = True,
index: bool_t = True,
na_rep: str = "NaN",
@@ -3237,14 +3232,15 @@ def to_latex(
.. versionchanged:: 1.2.0
Added position argument, changed meaning of caption argument.
+ .. versionchanged:: 2.0.0
+ Refactored to use the Styler implementation via jinja2 templating.
+
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
- col_space : int, optional
- The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
@@ -3318,7 +3314,12 @@ def to_latex(
``\begin{{}}`` in the output.
.. versionadded:: 1.2.0
- {returns}
+
+ Returns
+ -------
+ str or None
+ If buf is None, returns the result as a string. Otherwise returns None.
+
See Also
--------
io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX
@@ -3327,30 +3328,35 @@ def to_latex(
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
+ Notes
+ -----
+ As of v2.0.0 this method has changed to use the Styler implementation as
+ part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means
+ that ``jinja2`` is a requirement, and needs to be installed, for this method
+ to function. It is advised that users switch to using Styler, since that
+ implementation is more frequently updated and contains much more
+ flexibility with the output.
+
Examples
--------
+ Convert a general DataFrame to LaTeX with formatting:
+
>>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'],
- ... mask=['red', 'purple'],
- ... weapon=['sai', 'bo staff']))
- >>> print(df.to_latex(index=False)) # doctest: +SKIP
- \begin{{tabular}}{{lll}}
- \toprule
- name & mask & weapon \\
- \midrule
- Raphael & red & sai \\
- Donatello & purple & bo staff \\
+ ... age=[26, 45],
+ ... height=[181.23, 177.65]))
+ >>> print(df.to_latex(index=False,
+ ... formatters={"name": str.upper},
+ ... float_format="{:.1f}".format,
+ ... ) # doctest: +SKIP
+ \begin{tabular}{lrr}
+ \toprule
+ name & age & height \\
+ \midrule
+ RAPHAEL & 26 & 181.2 \\
+ DONATELLO & 45 & 177.7 \\
\bottomrule
- \end{{tabular}}
- """
- msg = (
- "In future versions `DataFrame.to_latex` is expected to utilise the base "
- "implementation of `Styler.to_latex` for formatting and rendering. "
- "The arguments signature may therefore change. It is recommended instead "
- "to use `DataFrame.style.to_latex` which also contains additional "
- "functionality."
- )
- warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
-
+ \end{tabular}
+ """
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
@@ -3365,35 +3371,170 @@ def to_latex(
if multirow is None:
multirow = config.get_option("display.latex.multirow")
- self = cast("DataFrame", self)
- formatter = DataFrameFormatter(
- self,
- columns=columns,
- col_space=col_space,
- na_rep=na_rep,
- header=header,
- index=index,
- formatters=formatters,
- float_format=float_format,
- bold_rows=bold_rows,
- sparsify=sparsify,
- index_names=index_names,
- escape=escape,
- decimal=decimal,
- )
- return DataFrameRenderer(formatter).to_latex(
- buf=buf,
- column_format=column_format,
- longtable=longtable,
- encoding=encoding,
- multicolumn=multicolumn,
- multicolumn_format=multicolumn_format,
- multirow=multirow,
- caption=caption,
- label=label,
- position=position,
+ if column_format is not None and not isinstance(column_format, str):
+ raise ValueError("`column_format` must be str or unicode")
+ length = len(self.columns) if columns is None else len(columns)
+ if isinstance(header, (list, tuple)) and len(header) != length:
+ raise ValueError(f"Writing {length} cols but got {len(header)} aliases")
+
+ # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure
+ base_format_ = {
+ "na_rep": na_rep,
+ "escape": "latex" if escape else None,
+ "decimal": decimal,
+ }
+ index_format_: dict[str, Any] = {"axis": 0, **base_format_}
+ column_format_: dict[str, Any] = {"axis": 1, **base_format_}
+
+ if isinstance(float_format, str):
+ float_format_: Callable | None = lambda x: float_format % x
+ else:
+ float_format_ = float_format
+
+ def _wrap(x, alt_format_):
+ if isinstance(x, (float, complex)) and float_format_ is not None:
+ return float_format_(x)
+ else:
+ return alt_format_(x)
+
+ formatters_: list | tuple | dict | Callable | None = None
+ if isinstance(formatters, list):
+ formatters_ = {
+ c: partial(_wrap, alt_format_=formatters[i])
+ for i, c in enumerate(self.columns)
+ }
+ elif isinstance(formatters, dict):
+ index_formatter = formatters.pop("__index__", None)
+ column_formatter = formatters.pop("__columns__", None)
+ if index_formatter is not None:
+ index_format_.update({"formatter": index_formatter})
+ if column_formatter is not None:
+ column_format_.update({"formatter": column_formatter})
+
+ formatters_ = formatters
+ float_columns = self.select_dtypes(include="float").columns
+ for col in float_columns:
+ if col not in formatters.keys():
+ formatters_.update({col: float_format_})
+ elif formatters is None and float_format is not None:
+ formatters_ = partial(_wrap, alt_format_=lambda v: v)
+ format_index_ = [index_format_, column_format_]
+
+ # Deal with hiding indexes and relabelling column names
+ hide_: list[dict] = []
+ relabel_index_: list[dict] = []
+ if columns:
+ hide_.append(
+ {
+ "subset": [c for c in self.columns if c not in columns],
+ "axis": "columns",
+ }
+ )
+ if header is False:
+ hide_.append({"axis": "columns"})
+ elif isinstance(header, (list, tuple)):
+ relabel_index_.append({"labels": header, "axis": "columns"})
+ format_index_ = [index_format_] # column_format is overwritten
+
+ if index is False:
+ hide_.append({"axis": "index"})
+ if index_names is False:
+ hide_.append({"names": True, "axis": "index"})
+
+ render_kwargs_ = {
+ "hrules": True,
+ "sparse_index": sparsify,
+ "sparse_columns": sparsify,
+ "environment": "longtable" if longtable else None,
+ "multicol_align": multicolumn_format
+ if multicolumn
+ else f"naive-{multicolumn_format}",
+ "multirow_align": "t" if multirow else "naive",
+ "encoding": encoding,
+ "caption": caption,
+ "label": label,
+ "position": position,
+ "column_format": column_format,
+ "clines": "skip-last;data" if multirow else None,
+ "bold_rows": bold_rows,
+ }
+
+ return self._to_latex_via_styler(
+ buf,
+ hide=hide_,
+ relabel_index=relabel_index_,
+ format={"formatter": formatters_, **base_format_},
+ format_index=format_index_,
+ render_kwargs=render_kwargs_,
)
+ def _to_latex_via_styler(
+ self,
+ buf=None,
+ *,
+ hide: dict | list[dict] | None = None,
+ relabel_index: dict | list[dict] | None = None,
+ format: dict | list[dict] | None = None,
+ format_index: dict | list[dict] | None = None,
+ render_kwargs: dict | None = None,
+ ):
+ """
+ Render object to a LaTeX tabular, longtable, or nested table.
+
+ Uses the ``Styler`` implementation with the following, ordered, method chaining:
+
+ .. code-block:: python
+ styler = Styler(DataFrame)
+ styler.hide(**hide)
+ styler.relabel_index(**relabel_index)
+ styler.format(**format)
+ styler.format_index(**format_index)
+ styler.to_latex(buf=buf, **render_kwargs)
+
+ Parameters
+ ----------
+ buf : str, Path or StringIO-like, optional, default None
+ Buffer to write to. If None, the output is returned as a string.
+ hide : dict, list of dict
+ Keyword args to pass to the method call of ``Styler.hide``. If a list will
+ call the method numerous times.
+ relabel_index : dict, list of dict
+ Keyword args to pass to the method of ``Styler.relabel_index``. If a list
+ will call the method numerous times.
+ format : dict, list of dict
+ Keyword args to pass to the method call of ``Styler.format``. If a list will
+ call the method numerous times.
+ format_index : dict, list of dict
+ Keyword args to pass to the method call of ``Styler.format_index``. If a
+ list will call the method numerous times.
+ render_kwargs : dict
+ Keyword args to pass to the method call of ``Styler.to_latex``.
+
+ Returns
+ -------
+ str or None
+ If buf is None, returns the result as a string. Otherwise returns None.
+ """
+ from pandas.io.formats.style import Styler
+
+ self = cast("DataFrame", self)
+ styler = Styler(self, uuid="")
+
+ for kw_name in ["hide", "relabel_index", "format", "format_index"]:
+ kw = vars()[kw_name]
+ if isinstance(kw, dict):
+ getattr(styler, kw_name)(**kw)
+ elif isinstance(kw, list):
+ for sub_kw in kw:
+ getattr(styler, kw_name)(**sub_kw)
+
+ # bold_rows is not a direct kwarg of Styler.to_latex
+ render_kwargs = {} if render_kwargs is None else render_kwargs
+ if render_kwargs.pop("bold_rows"):
+ styler.applymap_index(lambda v: "textbf:--rwrap;")
+
+ return styler.to_latex(buf=buf, **render_kwargs)
+
@overload
def to_csv(
self,
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 7f1775c53ce9e..dd361809e197c 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -237,7 +237,7 @@ def __init__(
precision: int | None = None,
table_styles: CSSStyles | None = None,
uuid: str | None = None,
- caption: str | tuple | None = None,
+ caption: str | tuple | list | None = None,
table_attributes: str | None = None,
cell_ids: bool = True,
na_rep: str | None = None,
@@ -2173,13 +2173,13 @@ def set_uuid(self, uuid: str) -> Styler:
self.uuid = uuid
return self
- def set_caption(self, caption: str | tuple) -> Styler:
+ def set_caption(self, caption: str | tuple | list) -> Styler:
"""
Set the text added to a ``<caption>`` HTML element.
Parameters
----------
- caption : str, tuple
+ caption : str, tuple, list
For HTML output either the string input is used or the first element of the
tuple. For LaTeX the string input provides a caption and the additional
tuple input allows for full captions and short captions, in that order.
@@ -2189,7 +2189,7 @@ def set_caption(self, caption: str | tuple) -> Styler:
Styler
"""
msg = "`caption` must be either a string or 2-tuple of strings."
- if isinstance(caption, tuple):
+ if isinstance(caption, (list, tuple)):
if (
len(caption) != 2
or not isinstance(caption[0], str)
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index c0e00d6bd30a4..5264342661b3f 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -85,7 +85,7 @@ def __init__(
uuid_len: int = 5,
table_styles: CSSStyles | None = None,
table_attributes: str | None = None,
- caption: str | tuple | None = None,
+ caption: str | tuple | list | None = None,
cell_ids: bool = True,
precision: int | None = None,
) -> None:
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 702c4a505a06a..687bad07926d0 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -280,22 +280,23 @@ def test_repr_column_name_unicode_truncation_bug(self):
with option_context("display.max_columns", 20):
assert "StringCol" in repr(df)
- @pytest.mark.filterwarnings(
- "ignore:.*DataFrame.to_latex` is expected to utilise:FutureWarning"
- )
def test_latex_repr(self):
- result = r"""\begin{tabular}{llll}
+ pytest.importorskip("jinja2")
+ expected = r"""\begin{tabular}{llll}
\toprule
-{} & 0 & 1 & 2 \\
+ & 0 & 1 & 2 \\
\midrule
-0 & $\alpha$ & b & c \\
-1 & 1 & 2 & 3 \\
+0 & $\alpha$ & b & c \\
+1 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
- with option_context("display.latex.escape", False, "display.latex.repr", True):
+ with option_context(
+ "display.latex.escape", False, "styler.render.repr", "latex"
+ ):
df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]])
- assert result == df._repr_latex_()
+ result = df._repr_latex_()
+ assert result == expected
# GH 12182
assert df._repr_latex_() is None
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 54e0feb28932b..70a2fc7dcc9dd 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -3430,7 +3430,6 @@ def test_repr_html_ipython_config(ip):
assert not result.error_in_exec
-@pytest.mark.filterwarnings("ignore:In future versions `DataFrame.to_latex`")
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
@pytest.mark.parametrize(
"encoding, data",
@@ -3445,6 +3444,8 @@ def test_filepath_or_buffer_arg(
filepath_or_buffer_id,
):
df = DataFrame([data])
+ if method in ["to_latex"]: # uses styler implementation
+ pytest.importorskip("jinja2")
if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
with pytest.raises(
@@ -3452,10 +3453,8 @@ def test_filepath_or_buffer_arg(
):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
elif encoding == "foo":
- expected_warning = FutureWarning if method == "to_latex" else None
- with tm.assert_produces_warning(expected_warning):
- with pytest.raises(LookupError, match="unknown encoding"):
- getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ with pytest.raises(LookupError, match="unknown encoding"):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
else:
expected = getattr(df, method)()
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
@@ -3465,6 +3464,8 @@ def test_filepath_or_buffer_arg(
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
+ if method in ["to_latex"]: # uses styler implementation
+ pytest.importorskip("jinja2")
msg = "buf is not a file name and it has no write method"
with pytest.raises(TypeError, match=msg):
getattr(float_frame, method)(buf=object())
diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py
index 4ded7bebc431e..3532f979665ec 100644
--- a/pandas/tests/io/formats/test_printing.py
+++ b/pandas/tests/io/formats/test_printing.py
@@ -1,7 +1,6 @@
import string
import numpy as np
-import pytest
import pandas._config.config as cf
@@ -120,9 +119,6 @@ def test_ambiguous_width(self):
class TestTableSchemaRepr:
- @pytest.mark.filterwarnings(
- "ignore:.*signature may therefore change.*:FutureWarning"
- )
def test_publishes(self, ip):
ipython = ip.instance(config=ip.config)
df = pd.DataFrame({"A": [1, 2]})
@@ -138,7 +134,7 @@ def test_publishes(self, ip):
formatted = ipython.display_formatter.format(obj)
assert set(formatted[0].keys()) == expected
- with_latex = pd.option_context("display.latex.repr", True)
+ with_latex = pd.option_context("styler.render.repr", "latex")
with opt, with_latex:
formatted = ipython.display_formatter.format(obj)
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index d6999b32e6a81..42adf3f7b2826 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -19,7 +19,7 @@
RowStringConverter,
)
-pytestmark = pytest.mark.filterwarnings("ignore::FutureWarning")
+pytest.importorskip("jinja2")
def _dedent(string):
@@ -68,10 +68,10 @@ def test_to_latex_tabular_with_index(self):
r"""
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -85,10 +85,10 @@ def test_to_latex_tabular_without_index(self):
r"""
\begin{tabular}{rl}
\toprule
- a & b \\
+ a & b \\
\midrule
- 1 & b1 \\
- 2 & b2 \\
+ 1 & b1 \\
+ 2 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -101,7 +101,7 @@ def test_to_latex_tabular_without_index(self):
)
def test_to_latex_bad_column_format(self, bad_column_format):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
- msg = r"column_format must be str or unicode"
+ msg = r"`column_format` must be str or unicode"
with pytest.raises(ValueError, match=msg):
df.to_latex(column_format=bad_column_format)
@@ -116,10 +116,10 @@ def test_to_latex_column_format(self):
r"""
\begin{tabular}{lcr}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -134,10 +134,10 @@ def test_to_latex_float_format_object_col(self):
r"""
\begin{tabular}{ll}
\toprule
- {} & 0 \\
+ & 0 \\
\midrule
0 & 1,000 \\
- 1 & test \\
+ 1 & test \\
\bottomrule
\end{tabular}
"""
@@ -151,9 +151,7 @@ def test_to_latex_empty_tabular(self):
r"""
\begin{tabular}{l}
\toprule
- Empty DataFrame
- Columns: RangeIndex(start=0, stop=0, step=1)
- Index: RangeIndex(start=0, stop=0, step=1) \\
+ \midrule
\bottomrule
\end{tabular}
"""
@@ -167,11 +165,11 @@ def test_to_latex_series(self):
r"""
\begin{tabular}{ll}
\toprule
- {} & 0 \\
+ & 0 \\
\midrule
- 0 & a \\
- 1 & b \\
- 2 & c \\
+ 0 & a \\
+ 1 & b \\
+ 2 & c \\
\bottomrule
\end{tabular}
"""
@@ -187,10 +185,10 @@ def test_to_latex_midrule_location(self):
r"""
\begin{tabular}{lr}
\toprule
- {} & a \\
+ & a \\
\midrule
- 0 & 1 \\
- 1 & 2 \\
+ 0 & 1 \\
+ 1 & 2 \\
\bottomrule
\end{tabular}
"""
@@ -206,9 +204,17 @@ def test_to_latex_empty_longtable(self):
r"""
\begin{longtable}{l}
\toprule
- Empty DataFrame
- Columns: RangeIndex(start=0, stop=0, step=1)
- Index: RangeIndex(start=0, stop=0, step=1) \\
+ \midrule
+ \endfirsthead
+ \toprule
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{0}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
\end{longtable}
"""
)
@@ -221,23 +227,21 @@ def test_to_latex_longtable_with_index(self):
r"""
\begin{longtable}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
\endfirsthead
-
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
\endhead
\midrule
- \multicolumn{3}{r}{{Continued on next page}} \\
+ \multicolumn{3}{r}{Continued on next page} \\
\midrule
\endfoot
-
\bottomrule
\endlastfoot
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\end{longtable}
"""
)
@@ -250,23 +254,21 @@ def test_to_latex_longtable_without_index(self):
r"""
\begin{longtable}{rl}
\toprule
- a & b \\
+ a & b \\
\midrule
\endfirsthead
-
\toprule
- a & b \\
+ a & b \\
\midrule
\endhead
\midrule
- \multicolumn{2}{r}{{Continued on next page}} \\
+ \multicolumn{2}{r}{Continued on next page} \\
\midrule
\endfoot
-
\bottomrule
\endlastfoot
- 1 & b1 \\
- 2 & b2 \\
+ 1 & b1 \\
+ 2 & b2 \\
\end{longtable}
"""
)
@@ -294,8 +296,9 @@ def test_to_latex_no_header_with_index(self):
r"""
\begin{tabular}{lrl}
\toprule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -310,6 +313,7 @@ def test_to_latex_no_header_without_index(self):
r"""
\begin{tabular}{rl}
\toprule
+ \midrule
1 & b1 \\
2 & b2 \\
\bottomrule
@@ -326,10 +330,10 @@ def test_to_latex_specified_header_with_index(self):
r"""
\begin{tabular}{lrl}
\toprule
- {} & AA & BB \\
+ & AA & BB \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -346,8 +350,8 @@ def test_to_latex_specified_header_without_index(self):
\toprule
AA & BB \\
\midrule
- 1 & b1 \\
- 2 & b2 \\
+ 1 & b1 \\
+ 2 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -382,10 +386,10 @@ def test_to_latex_decimal(self):
r"""
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1,0 & b1 \\
- 1 & 2,1 & b2 \\
+ 0 & 1,000000 & b1 \\
+ 1 & 2,100000 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -402,10 +406,10 @@ def test_to_latex_bold_rows(self):
r"""
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- \textbf{0} & 1 & b1 \\
- \textbf{1} & 2 & b2 \\
+ \textbf{0} & 1 & b1 \\
+ \textbf{1} & 2 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -420,10 +424,10 @@ def test_to_latex_no_bold_rows(self):
r"""
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -463,14 +467,13 @@ def test_to_latex_caption_only(self, df_short, caption_table):
expected = _dedent(
r"""
\begin{table}
- \centering
\caption{a table in a \texttt{table/tabular} environment}
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
@@ -484,14 +487,13 @@ def test_to_latex_label_only(self, df_short, label_table):
expected = _dedent(
r"""
\begin{table}
- \centering
\label{tab:table_tabular}
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
@@ -505,15 +507,14 @@ def test_to_latex_caption_and_label(self, df_short, caption_table, label_table):
expected = _dedent(
r"""
\begin{table}
- \centering
\caption{a table in a \texttt{table/tabular} environment}
\label{tab:table_tabular}
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
@@ -531,14 +532,13 @@ def test_to_latex_caption_and_shortcaption(
expected = _dedent(
r"""
\begin{table}
- \centering
\caption[a table]{a table in a \texttt{table/tabular} environment}
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
@@ -567,15 +567,14 @@ def test_to_latex_caption_shortcaption_and_label(
expected = _dedent(
r"""
\begin{table}
- \centering
\caption[a table]{a table in a \texttt{table/tabular} environment}
\label{tab:table_tabular}
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
@@ -596,7 +595,7 @@ def test_to_latex_caption_shortcaption_and_label(
def test_to_latex_bad_caption_raises(self, bad_caption):
# test that wrong number of params is raised
df = DataFrame({"a": [1]})
- msg = "caption must be either a string or a tuple of two strings"
+ msg = "`caption` must be either a string or 2-tuple of strings"
with pytest.raises(ValueError, match=msg):
df.to_latex(caption=bad_caption)
@@ -607,14 +606,13 @@ def test_to_latex_two_chars_caption(self, df_short):
expected = _dedent(
r"""
\begin{table}
- \centering
\caption{xy}
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
@@ -630,25 +628,24 @@ def test_to_latex_longtable_caption_only(self, df_short, caption_longtable):
expected = _dedent(
r"""
\begin{longtable}{lrl}
- \caption{a table in a \texttt{longtable} environment}\\
+ \caption{a table in a \texttt{longtable} environment} \\
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
\endfirsthead
\caption[]{a table in a \texttt{longtable} environment} \\
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
\endhead
\midrule
- \multicolumn{3}{r}{{Continued on next page}} \\
+ \multicolumn{3}{r}{Continued on next page} \\
\midrule
\endfoot
-
\bottomrule
\endlastfoot
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\end{longtable}
"""
)
@@ -660,25 +657,23 @@ def test_to_latex_longtable_label_only(self, df_short, label_longtable):
expected = _dedent(
r"""
\begin{longtable}{lrl}
- \label{tab:longtable}\\
+ \label{tab:longtable} \\
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
\endfirsthead
-
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
\endhead
\midrule
- \multicolumn{3}{r}{{Continued on next page}} \\
+ \multicolumn{3}{r}{Continued on next page} \\
\midrule
\endfoot
-
\bottomrule
\endlastfoot
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\end{longtable}
"""
)
@@ -698,29 +693,27 @@ def test_to_latex_longtable_caption_and_label(
)
expected = _dedent(
r"""
- \begin{longtable}{lrl}
- \caption{a table in a \texttt{longtable} environment}
- \label{tab:longtable}\\
- \toprule
- {} & a & b \\
- \midrule
- \endfirsthead
- \caption[]{a table in a \texttt{longtable} environment} \\
- \toprule
- {} & a & b \\
- \midrule
- \endhead
- \midrule
- \multicolumn{3}{r}{{Continued on next page}} \\
- \midrule
- \endfoot
-
- \bottomrule
- \endlastfoot
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
- \end{longtable}
- """
+ \begin{longtable}{lrl}
+ \caption{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \caption[]{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
)
assert result == expected
@@ -739,29 +732,27 @@ def test_to_latex_longtable_caption_shortcaption_and_label(
)
expected = _dedent(
r"""
- \begin{longtable}{lrl}
- \caption[a table]{a table in a \texttt{longtable} environment}
- \label{tab:longtable}\\
- \toprule
- {} & a & b \\
- \midrule
- \endfirsthead
- \caption[]{a table in a \texttt{longtable} environment} \\
- \toprule
- {} & a & b \\
- \midrule
- \endhead
- \midrule
- \multicolumn{3}{r}{{Continued on next page}} \\
- \midrule
- \endfoot
-
- \bottomrule
- \endlastfoot
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
- \end{longtable}
- """
+\begin{longtable}{lrl}
+\caption[a table]{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
+\toprule
+ & a & b \\
+\midrule
+\endfirsthead
+\caption[]{a table in a \texttt{longtable} environment} \\
+\toprule
+ & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{Continued on next page} \\
+\midrule
+\endfoot
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
)
assert result == expected
@@ -780,10 +771,10 @@ def test_to_latex_escape_false(self, df_with_symbols):
r"""
\begin{tabular}{lll}
\toprule
- {} & co$e^x$ & co^l1 \\
+ & co$e^x$ & co^l1 \\
\midrule
- a & a & a \\
- b & b & b \\
+ a & a & a \\
+ b & b & b \\
\bottomrule
\end{tabular}
"""
@@ -796,10 +787,10 @@ def test_to_latex_escape_default(self, df_with_symbols):
r"""
\begin{tabular}{lll}
\toprule
- {} & co\$e\textasciicircum x\$ & co\textasciicircum l1 \\
+ & co\$e\textasciicircum x\$ & co\textasciicircum l1 \\
\midrule
- a & a & a \\
- b & b & b \\
+ a & a & a \\
+ b & b & b \\
\bottomrule
\end{tabular}
"""
@@ -813,11 +804,11 @@ def test_to_latex_special_escape(self):
r"""
\begin{tabular}{ll}
\toprule
- {} & 0 \\
+ & 0 \\
\midrule
- 0 & a\textbackslash b\textbackslash c \\
- 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\
- 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\
+ 0 & a\textbackslash b\textbackslash c \\
+ 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\
+ 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\
\bottomrule
\end{tabular}
"""
@@ -832,18 +823,18 @@ def test_to_latex_escape_special_chars(self):
r"""
\begin{tabular}{ll}
\toprule
- {} & 0 \\
- \midrule
- 0 & \& \\
- 1 & \% \\
- 2 & \$ \\
- 3 & \# \\
- 4 & \_ \\
- 5 & \{ \\
- 6 & \} \\
- 7 & \textasciitilde \\
- 8 & \textasciicircum \\
- 9 & \textbackslash \\
+ & 0 \\
+ \midrule
+ 0 & \& \\
+ 1 & \% \\
+ 2 & \$ \\
+ 3 & \# \\
+ 4 & \_ \\
+ 5 & \{ \\
+ 6 & \} \\
+ 7 & \textasciitilde \\
+ 8 & \textasciicircum \\
+ 9 & \textbackslash \\
\bottomrule
\end{tabular}
"""
@@ -858,10 +849,10 @@ def test_to_latex_specified_header_special_chars_without_escape(self):
r"""
\begin{tabular}{lrl}
\toprule
- {} & $A$ & $B$ \\
+ & $A$ & $B$ \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
@@ -877,13 +868,12 @@ def test_to_latex_position(self):
expected = _dedent(
r"""
\begin{table}[h]
- \centering
\begin{tabular}{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
@@ -899,23 +889,21 @@ def test_to_latex_longtable_position(self):
r"""
\begin{longtable}[t]{lrl}
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
\endfirsthead
-
\toprule
- {} & a & b \\
+ & a & b \\
\midrule
\endhead
\midrule
- \multicolumn{3}{r}{{Continued on next page}} \\
+ \multicolumn{3}{r}{Continued on next page} \\
\midrule
\endfoot
-
\bottomrule
\endlastfoot
- 0 & 1 & b1 \\
- 1 & 2 & b2 \\
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
\end{longtable}
"""
)
@@ -950,11 +938,11 @@ def test_to_latex_with_formatters(self):
r"""
\begin{tabular}{llrrl}
\toprule
- {} & datetime64 & float & int & object \\
+ & datetime64 & float & int & object \\
\midrule
- index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
- index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
- index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
+ index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
+ index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
+ index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
\bottomrule
\end{tabular}
"""
@@ -969,7 +957,7 @@ def test_to_latex_float_format_no_fixed_width_3decimals(self):
r"""
\begin{tabular}{lr}
\toprule
- {} & x \\
+ & x \\
\midrule
0 & 0.200 \\
\bottomrule
@@ -986,7 +974,7 @@ def test_to_latex_float_format_no_fixed_width_integer(self):
r"""
\begin{tabular}{lr}
\toprule
- {} & x \\
+ & x \\
\midrule
0 & 100 \\
\bottomrule
@@ -1009,10 +997,10 @@ def test_to_latex_na_rep_and_float_format(self, na_rep):
rf"""
\begin{{tabular}}{{llr}}
\toprule
- {{}} & Group & Data \\
+ & Group & Data \\
\midrule
- 0 & A & 1.22 \\
- 1 & A & {na_rep} \\
+ 0 & A & 1.22 \\
+ 1 & A & {na_rep} \\
\bottomrule
\end{{tabular}}
"""
@@ -1056,10 +1044,10 @@ def test_to_latex_multindex_header(self):
r"""
\begin{tabular}{llrr}
\toprule
- & & r1 & r2 \\
- a & b & & \\
+ & & r1 & r2 \\
+ a & b & & \\
\midrule
- 0 & 1 & 2 & 3 \\
+ 0 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
@@ -1075,8 +1063,8 @@ def test_to_latex_multiindex_empty_name(self):
r"""
\begin{tabular}{lrrrr}
\toprule
- & 0 & 1 & 2 & 3 \\
- {} & & & & \\
+ & 0 & 1 & 2 & 3 \\
+ & & & & \\
\midrule
1 & -1 & -1 & -1 & -1 \\
2 & -1 & -1 & -1 & -1 \\
@@ -1093,10 +1081,10 @@ def test_to_latex_multiindex_column_tabular(self):
r"""
\begin{tabular}{ll}
\toprule
- {} & x \\
- {} & y \\
+ & x \\
+ & y \\
\midrule
- 0 & a \\
+ 0 & a \\
\bottomrule
\end{tabular}
"""
@@ -1110,9 +1098,9 @@ def test_to_latex_multiindex_small_tabular(self):
r"""
\begin{tabular}{lll}
\toprule
- & & 0 \\
+ & & 0 \\
\midrule
- x & y & a \\
+ x & y & a \\
\bottomrule
\end{tabular}
"""
@@ -1125,13 +1113,13 @@ def test_to_latex_multiindex_tabular(self, multiindex_frame):
r"""
\begin{tabular}{llrrrr}
\toprule
- & & 0 & 1 & 2 & 3 \\
+ & & 0 & 1 & 2 & 3 \\
\midrule
- c1 & 0 & 0 & 1 & 2 & 3 \\
- & 1 & 4 & 5 & 6 & 7 \\
- c2 & 0 & 0 & 1 & 2 & 3 \\
- & 1 & 4 & 5 & 6 & 7 \\
- c3 & 0 & 0 & 1 & 2 & 3 \\
+ c1 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c2 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c3 & 0 & 0 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
@@ -1148,12 +1136,12 @@ def test_to_latex_multicolumn_tabular(self, multiindex_frame):
\begin{tabular}{lrrrrr}
\toprule
a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
- b & 0 & 1 & 0 & 1 & 0 \\
+ b & 0 & 1 & 0 & 1 & 0 \\
\midrule
- 0 & 0 & 4 & 0 & 4 & 0 \\
- 1 & 1 & 5 & 1 & 5 & 1 \\
- 2 & 2 & 6 & 2 & 6 & 2 \\
- 3 & 3 & 7 & 3 & 7 & 3 \\
+ 0 & 0 & 4 & 0 & 4 & 0 \\
+ 1 & 1 & 5 & 1 & 5 & 1 \\
+ 2 & 2 & 6 & 2 & 6 & 2 \\
+ 3 & 3 & 7 & 3 & 7 & 3 \\
\bottomrule
\end{tabular}
"""
@@ -1168,13 +1156,13 @@ def test_to_latex_index_has_name_tabular(self):
r"""
\begin{tabular}{llr}
\toprule
- & & c \\
- a & b & \\
+ & & c \\
+ a & b & \\
\midrule
- 0 & a & 1 \\
- & b & 2 \\
- 1 & a & 3 \\
- & b & 4 \\
+ 0 & a & 1 \\
+ & b & 2 \\
+ 1 & a & 3 \\
+ & b & 4 \\
\bottomrule
\end{tabular}
"""
@@ -1184,17 +1172,17 @@ def test_to_latex_index_has_name_tabular(self):
def test_to_latex_groupby_tabular(self):
# GH 10660
df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
- result = df.groupby("a").describe().to_latex()
+ result = df.groupby("a").describe().to_latex(float_format="{:.1f}".format)
expected = _dedent(
r"""
\begin{tabular}{lrrrrrrrr}
\toprule
- {} & \multicolumn{8}{l}{c} \\
- {} & count & mean & std & min & 25\% & 50\% & 75\% & max \\
- a & & & & & & & & \\
+ & \multicolumn{8}{l}{c} \\
+ & count & mean & std & min & 25\% & 50\% & 75\% & max \\
+ a & & & & & & & & \\
\midrule
- 0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\
- 1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\
+ 0 & 2.0 & 1.5 & 0.7 & 1.0 & 1.2 & 1.5 & 1.8 & 2.0 \\
+ 1 & 2.0 & 3.5 & 0.7 & 3.0 & 3.2 & 3.5 & 3.8 & 4.0 \\
\bottomrule
\end{tabular}
"""
@@ -1217,10 +1205,10 @@ def test_to_latex_multiindex_dupe_level(self):
r"""
\begin{tabular}{lll}
\toprule
- & & col \\
+ & & col \\
\midrule
- A & c & NaN \\
- B & c & NaN \\
+ A & c & NaN \\
+ B & c & NaN \\
\bottomrule
\end{tabular}
"""
@@ -1233,14 +1221,14 @@ def test_to_latex_multicolumn_default(self, multicolumn_frame):
r"""
\begin{tabular}{lrrrrr}
\toprule
- {} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
- {} & 0 & 1 & 0 & 1 & 0 \\
- \midrule
- 0 & 0 & 5 & 0 & 5 & 0 \\
- 1 & 1 & 6 & 1 & 6 & 1 \\
- 2 & 2 & 7 & 2 & 7 & 2 \\
- 3 & 3 & 8 & 3 & 8 & 3 \\
- 4 & 4 & 9 & 4 & 9 & 4 \\
+ & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
+ & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
\bottomrule
\end{tabular}
"""
@@ -1253,14 +1241,14 @@ def test_to_latex_multicolumn_false(self, multicolumn_frame):
r"""
\begin{tabular}{lrrrrr}
\toprule
- {} & c1 & & c2 & & c3 \\
- {} & 0 & 1 & 0 & 1 & 0 \\
- \midrule
- 0 & 0 & 5 & 0 & 5 & 0 \\
- 1 & 1 & 6 & 1 & 6 & 1 \\
- 2 & 2 & 7 & 2 & 7 & 2 \\
- 3 & 3 & 8 & 3 & 8 & 3 \\
- 4 & 4 & 9 & 4 & 9 & 4 \\
+ & c1 & & c2 & & c3 \\
+ & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
\bottomrule
\end{tabular}
"""
@@ -1273,15 +1261,16 @@ def test_to_latex_multirow_true(self, multicolumn_frame):
r"""
\begin{tabular}{llrrrrr}
\toprule
- & & 0 & 1 & 2 & 3 & 4 \\
+ & & 0 & 1 & 2 & 3 & 4 \\
\midrule
- \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
- & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \multirow[t]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
- \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
- & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \multirow[t]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\cline{1-7}
- c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\bottomrule
\end{tabular}
"""
@@ -1299,16 +1288,17 @@ def test_to_latex_multicolumnrow_with_multicol_format(self, multicolumn_frame):
r"""
\begin{tabular}{llrrrrr}
\toprule
- & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
- & & 0 & 1 & 0 & 1 & 0 \\
+ & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
+ & & 0 & 1 & 0 & 1 & 0 \\
\midrule
- \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
- & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \multirow[t]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ \multirow[t]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
- \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
- & 1 & 5 & 6 & 7 & 8 & 9 \\
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\cline{1-7}
- c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\bottomrule
\end{tabular}
"""
@@ -1326,25 +1316,24 @@ def test_to_latex_multiindex_names(self, name0, name1, axes):
for idx in axes:
df.axes[idx].names = names
- idx_names = tuple(n or "{}" for n in names)
+ idx_names = tuple(n or "" for n in names)
idx_names_row = (
- f"{idx_names[0]} & {idx_names[1]} & & & & \\\\\n"
+ f"{idx_names[0]} & {idx_names[1]} & & & & \\\\\n"
if (0 in axes and any(names))
else ""
)
- placeholder = "{}" if any(names) and 1 in axes else " "
- col_names = [n if (bool(n) and 1 in axes) else placeholder for n in names]
+ col_names = [n if (bool(n) and 1 in axes) else "" for n in names]
observed = df.to_latex()
# pylint: disable-next=consider-using-f-string
expected = r"""\begin{tabular}{llrrrr}
\toprule
- & %s & \multicolumn{2}{l}{1} & \multicolumn{2}{l}{2} \\
- & %s & 3 & 4 & 3 & 4 \\
+ & %s & \multicolumn{2}{l}{1} & \multicolumn{2}{l}{2} \\
+ & %s & 3 & 4 & 3 & 4 \\
%s\midrule
1 & 3 & -1 & -1 & -1 & -1 \\
- & 4 & -1 & -1 & -1 & -1 \\
+ & 4 & -1 & -1 & -1 & -1 \\
2 & 3 & -1 & -1 & -1 & -1 \\
- & 4 & -1 & -1 & -1 & -1 \\
+ & 4 & -1 & -1 & -1 & -1 \\
\bottomrule
\end{tabular}
""" % tuple(
@@ -1363,14 +1352,14 @@ def test_to_latex_multiindex_nans(self, one_row):
r"""
\begin{tabular}{llr}
\toprule
- & & c \\
- a & b & \\
+ & & c \\
+ a & b & \\
\midrule
- NaN & 2 & 4 \\
+ NaN & 2 & 4 \\
"""
)
if not one_row:
- expected += r"""1.0 & 3 & 5 \\
+ expected += r"""1.000000 & 3 & 5 \\
"""
expected += r"""\bottomrule
\end{tabular}
@@ -1385,11 +1374,11 @@ def test_to_latex_non_string_index(self):
r"""
\begin{tabular}{llr}
\toprule
- & & 2 \\
- 0 & 1 & \\
+ & & 2 \\
+ 0 & 1 & \\
\midrule
- 1 & 2 & 3 \\
- & 2 & 3 \\
+ 1 & 2 & 3 \\
+ & 2 & 3 \\
\bottomrule
\end{tabular}
"""
@@ -1407,27 +1396,26 @@ def test_to_latex_multiindex_multirow(self):
r"""
\begin{tabular}{lll}
\toprule
- & & \\
i & val0 & val1 \\
\midrule
- \multirow{6}{*}{0.0} & \multirow{2}{*}{3.0} & 0 \\
- & & 1 \\
+ \multirow[t]{6}{*}{0.000000} & \multirow[t]{2}{*}{3.000000} & 0 \\
+ & & 1 \\
\cline{2-3}
- & \multirow{2}{*}{2.0} & 0 \\
- & & 1 \\
+ & \multirow[t]{2}{*}{2.000000} & 0 \\
+ & & 1 \\
\cline{2-3}
- & \multirow{2}{*}{1.0} & 0 \\
- & & 1 \\
- \cline{1-3}
+ & \multirow[t]{2}{*}{1.000000} & 0 \\
+ & & 1 \\
+ \cline{1-3} \cline{2-3}
+ \multirow[t]{6}{*}{1.000000} & \multirow[t]{2}{*}{3.000000} & 0 \\
+ & & 1 \\
\cline{2-3}
- \multirow{6}{*}{1.0} & \multirow{2}{*}{3.0} & 0 \\
- & & 1 \\
+ & \multirow[t]{2}{*}{2.000000} & 0 \\
+ & & 1 \\
\cline{2-3}
- & \multirow{2}{*}{2.0} & 0 \\
- & & 1 \\
- \cline{2-3}
- & \multirow{2}{*}{1.0} & 0 \\
- & & 1 \\
+ & \multirow[t]{2}{*}{1.000000} & 0 \\
+ & & 1 \\
+ \cline{1-3} \cline{2-3}
\bottomrule
\end{tabular}
"""
@@ -1517,15 +1505,3 @@ def test_get_strrow_multindex_multicolumn(self, row_num, expected):
)
assert row_string_converter.get_strrow(row_num=row_num) == expected
-
- def test_future_warning(self):
- df = DataFrame([[1]])
- msg = (
- "In future versions `DataFrame.to_latex` is expected to utilise the base "
- "implementation of `Styler.to_latex` for formatting and rendering. "
- "The arguments signature may therefore change. It is recommended instead "
- "to use `DataFrame.style.to_latex` which also contains additional "
- "functionality."
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- df.to_latex()
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 5ade1e0913804..b248c0c460c74 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -330,7 +330,6 @@ def test_read_fspath_all(self, reader, module, path, datapath):
else:
tm.assert_frame_equal(result, expected)
- @pytest.mark.filterwarnings("ignore:In future versions `DataFrame.to_latex`")
@pytest.mark.parametrize(
"writer_name, writer_kwargs, module",
[
@@ -345,6 +344,8 @@ def test_read_fspath_all(self, reader, module, path, datapath):
],
)
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
+ if writer_name in ["to_latex"]: # uses Styler implementation
+ pytest.importorskip("jinja2")
p1 = tm.ensure_clean("string")
p2 = tm.ensure_clean("fspath")
df = pd.DataFrame({"A": [1, 2]})
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 57dcd06f8f524..43a6c7028883b 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -206,19 +206,21 @@ def test_timeseries_repr_object_dtype(self):
ts2 = ts.iloc[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
- @pytest.mark.filterwarnings("ignore::FutureWarning")
def test_latex_repr(self):
+ pytest.importorskip("jinja2") # uses Styler implementation
result = r"""\begin{tabular}{ll}
\toprule
-{} & 0 \\
+ & 0 \\
\midrule
-0 & $\alpha$ \\
-1 & b \\
-2 & c \\
+0 & $\alpha$ \\
+1 & b \\
+2 & c \\
\bottomrule
\end{tabular}
"""
- with option_context("display.latex.escape", False, "display.latex.repr", True):
+ with option_context(
+ "display.latex.escape", False, "styler.render.repr", "latex"
+ ):
s = Series([r"$\alpha$", "b", "c"])
assert result == s._repr_latex_()
| After a year of patching up things in #41649, and @jreback merge of #47864 I can finally propose this for review.
# Objective
- Maintain `DataFrame.to_latex` with its existing arguments, adding no arguments
- Process the rendering via `Styler.to_latex`
- Eliminate the need for `LatexFormatter` (code removal not part of this PR) and dual pandas code systems.
- Redocument and direct users to the `Styler` implementation for forward development
# Outcome
- All arguments in `DataFrame.to_latex` were replicable, with the exception of `col_space` which has no impact upon latex render and, <s>I personally don't like anyway. `col_space` is deprecated with warning and test.</s> `col_space` is removed.
- All original tests pass with minor changes to latex formatting and no significant changes to latex render.
- Some default formatting of floats changes based on pandas Styler options and DataFrame options crossover, which should be addressed later.
- The performance of `Styler` is marginally better, although for the table sizes that one would like to render in latex is neglible, anyway.
<s>No whats_new: awaiting feedback.</s>
# New docs
The key new section of the docs..

| https://api.github.com/repos/pandas-dev/pandas/pulls/47970 | 2022-08-04T20:38:08Z | 2023-01-19T20:01:01Z | 2023-01-19T20:01:01Z | 2023-01-19T21:49:50Z |
TYP: move imports inside TYPE_CHECKING | diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 12352c4490f29..cd88eb1e49be8 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -77,7 +77,6 @@
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
-from pandas.core.arrays.base import ExtensionArray
import pandas.core.common as com
if TYPE_CHECKING:
@@ -91,6 +90,8 @@
DatetimeArray,
TimedeltaArray,
)
+ from pandas.core.arrays.base import ExtensionArray
+
BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset)
diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py
index c293839776e26..2ea5a5367611f 100644
--- a/pandas/core/computation/engines.py
+++ b/pandas/core/computation/engines.py
@@ -4,6 +4,7 @@
from __future__ import annotations
import abc
+from typing import TYPE_CHECKING
from pandas.errors import NumExprClobberingError
@@ -11,7 +12,6 @@
align_terms,
reconstruct_object,
)
-from pandas.core.computation.expr import Expr
from pandas.core.computation.ops import (
MATHOPS,
REDUCTIONS,
@@ -19,6 +19,9 @@
import pandas.io.formats.printing as printing
+if TYPE_CHECKING:
+ from pandas.core.computation.expr import Expr
+
_ne_builtins = frozenset(MATHOPS + REDUCTIONS)
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 1892069f78edd..ea70d0130a119 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -4,6 +4,7 @@
from __future__ import annotations
import tokenize
+from typing import TYPE_CHECKING
import warnings
from pandas._libs.lib import no_default
@@ -15,12 +16,14 @@
PARSERS,
Expr,
)
-from pandas.core.computation.ops import BinOp
from pandas.core.computation.parsing import tokenize_string
from pandas.core.computation.scope import ensure_scope
from pandas.io.formats.printing import pprint_thing
+if TYPE_CHECKING:
+ from pandas.core.computation.ops import BinOp
+
def _check_engine(engine: str | None) -> str:
"""
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 29af322ba0b42..5fbc26c057862 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -3,7 +3,10 @@
import ast
from functools import partial
-from typing import Any
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
import numpy as np
@@ -12,7 +15,6 @@
Timestamp,
)
from pandas._typing import npt
-from pandas.compat.chainmap import DeepChainMap
from pandas.errors import UndefinedVariableError
from pandas.core.dtypes.common import is_list_like
@@ -34,6 +36,9 @@
pprint_thing_encoded,
)
+if TYPE_CHECKING:
+ from pandas.compat.chainmap import DeepChainMap
+
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py
index 2a2671374efc4..a9ad2401564ec 100644
--- a/pandas/core/groupby/categorical.py
+++ b/pandas/core/groupby/categorical.py
@@ -1,5 +1,7 @@
from __future__ import annotations
+from typing import TYPE_CHECKING
+
import numpy as np
from pandas.core.algorithms import unique1d
@@ -8,7 +10,9 @@
CategoricalDtype,
recode_for_categories,
)
-from pandas.core.indexes.api import CategoricalIndex
+
+if TYPE_CHECKING:
+ from pandas.core.indexes.api import CategoricalIndex
def recode_for_groupby(
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 631f70f390319..b5b585fb78950 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -11,6 +11,7 @@
from functools import partial
from textwrap import dedent
from typing import (
+ TYPE_CHECKING,
Any,
Callable,
Hashable,
@@ -72,7 +73,6 @@
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
-from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
@@ -93,6 +93,9 @@
from pandas.plotting import boxplot_frame_groupby
+if TYPE_CHECKING:
+ from pandas.core.generic import NDFrame
+
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 283e4a48657c5..d6b63956422c7 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -10,6 +10,7 @@
import collections
import functools
from typing import (
+ TYPE_CHECKING,
Callable,
Generic,
Hashable,
@@ -77,7 +78,6 @@
)
from pandas.core.arrays.string_ import StringDtype
from pandas.core.frame import DataFrame
-from pandas.core.generic import NDFrame
from pandas.core.groupby import grouper
from pandas.core.indexes.api import (
CategoricalIndex,
@@ -95,6 +95,9 @@
get_indexer_dict,
)
+if TYPE_CHECKING:
+ from pandas.core.generic import NDFrame
+
class WrappedCythonOp:
"""
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index 28a0b43d5b02b..5bf30dde33ba4 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -4,6 +4,7 @@
from __future__ import annotations
from typing import (
+ TYPE_CHECKING,
Callable,
TypeVar,
)
@@ -21,10 +22,12 @@
from pandas.core.dtypes.generic import ABCDataFrame
-from pandas.core.arrays import IntervalArray
-from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.indexes.base import Index
+if TYPE_CHECKING:
+ from pandas.core.arrays import IntervalArray
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
+
_T = TypeVar("_T", bound="NDArrayBackedExtensionIndex")
_ExtensionIndexT = TypeVar("_ExtensionIndexT", bound="ExtensionIndex")
diff --git a/pandas/core/interchange/dataframe.py b/pandas/core/interchange/dataframe.py
index ddcffbff64670..9139cb41e3af7 100644
--- a/pandas/core/interchange/dataframe.py
+++ b/pandas/core/interchange/dataframe.py
@@ -3,11 +3,11 @@
from collections import abc
from typing import TYPE_CHECKING
-import pandas as pd
from pandas.core.interchange.column import PandasColumn
from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg
if TYPE_CHECKING:
+ import pandas as pd
from pandas import Index
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index df327716970f1..a70f9b7b20d5a 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -98,7 +98,6 @@
PeriodArray,
TimedeltaArray,
)
-from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.base import PandasObject
import pandas.core.common as com
@@ -115,6 +114,7 @@
Float64Index,
Index,
)
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
# comparison is faster than is_object_dtype
_dtype_obj = np.dtype("object")
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 8797324166745..54b6b32ff1a68 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -68,7 +68,6 @@
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
-from pandas.core.indexes.api import Index
from pandas.core.indexes.datetimes import (
DatetimeIndex,
date_range,
@@ -96,6 +95,7 @@
if TYPE_CHECKING:
from pandas import (
DataFrame,
+ Index,
Series,
)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index e06a288c1eb38..77a0d34132da0 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -73,7 +73,6 @@
MultiIndex,
Series,
)
-from pandas.core import groupby
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
@@ -85,6 +84,7 @@
if TYPE_CHECKING:
from pandas import DataFrame
+ from pandas.core import groupby
from pandas.core.arrays import DatetimeArray
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index d4f4057af7bfd..5039a29b74f1b 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -30,7 +30,6 @@
Index,
MultiIndex,
)
-from pandas.core.indexes.frozen import FrozenList
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
@@ -42,6 +41,7 @@
if TYPE_CHECKING:
from pandas.core.arrays import ExtensionArray
+ from pandas.core.indexes.frozen import FrozenList
class _Unstacker:
diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py
index a6e125f4b9f33..185e93591cfe0 100644
--- a/pandas/io/excel/_odswriter.py
+++ b/pandas/io/excel/_odswriter.py
@@ -3,6 +3,7 @@
from collections import defaultdict
import datetime
from typing import (
+ TYPE_CHECKING,
Any,
DefaultDict,
Tuple,
@@ -21,7 +22,9 @@
combine_kwargs,
validate_freeze_panes,
)
-from pandas.io.formats.excel import ExcelCell
+
+if TYPE_CHECKING:
+ from pandas.io.formats.excel import ExcelCell
class ODSWriter(ExcelWriter):
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index 543df8bec657b..e0f6e01a65052 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -21,14 +21,13 @@
WriteBuffer,
)
-from pandas.core.indexes.api import Index
-
from pandas.io.formats import format as fmt
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
- from pandas.core.frame import (
+ from pandas import (
DataFrame,
+ Index,
Series,
)
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index 0fb6971018fd0..6bf4412be95d4 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -8,6 +8,7 @@
abstractmethod,
)
from typing import (
+ TYPE_CHECKING,
Iterator,
Sequence,
)
@@ -16,7 +17,8 @@
from pandas.core.dtypes.generic import ABCMultiIndex
-from pandas.io.formats.format import DataFrameFormatter
+if TYPE_CHECKING:
+ from pandas.io.formats.format import DataFrameFormatter
def _split_into_full_short_caption(
diff --git a/pandas/io/formats/string.py b/pandas/io/formats/string.py
index 835e15789d615..071afc059b166 100644
--- a/pandas/io/formats/string.py
+++ b/pandas/io/formats/string.py
@@ -4,13 +4,18 @@
from __future__ import annotations
from shutil import get_terminal_size
-from typing import Iterable
+from typing import (
+ TYPE_CHECKING,
+ Iterable,
+)
import numpy as np
-from pandas.io.formats.format import DataFrameFormatter
from pandas.io.formats.printing import pprint_thing
+if TYPE_CHECKING:
+ from pandas.io.formats.format import DataFrameFormatter
+
class StringFormatter:
"""Formatter for string representation of a dataframe."""
diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py
index 4e8573b74ccdd..eb1835f0392b0 100644
--- a/pandas/io/formats/xml.py
+++ b/pandas/io/formats/xml.py
@@ -5,7 +5,10 @@
import codecs
import io
-from typing import Any
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
from pandas._typing import (
CompressionOptions,
@@ -20,7 +23,6 @@
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.missing import isna
-from pandas.core.frame import DataFrame
from pandas.core.shared_docs import _shared_docs
from pandas.io.common import get_handle
@@ -29,6 +31,9 @@
preprocess_data,
)
+if TYPE_CHECKING:
+ from pandas import DataFrame
+
@doc(
storage_options=_shared_docs["storage_options"],
diff --git a/pandas/io/html.py b/pandas/io/html.py
index ad92883fe8572..dbbe61dcb8247 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -10,6 +10,7 @@
import numbers
import re
from typing import (
+ TYPE_CHECKING,
Iterable,
Pattern,
Sequence,
@@ -30,7 +31,6 @@
from pandas.core.dtypes.common import is_list_like
from pandas.core.construction import create_series_with_explicit_dtype
-from pandas.core.frame import DataFrame
from pandas.io.common import (
file_exists,
@@ -43,6 +43,9 @@
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
+if TYPE_CHECKING:
+ from pandas import DataFrame
+
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index d40b0357049a1..02a0b27f82ef8 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -9,6 +9,7 @@
from io import StringIO
from itertools import islice
from typing import (
+ TYPE_CHECKING,
Any,
Callable,
Generic,
@@ -53,7 +54,6 @@
to_datetime,
)
from pandas.core.construction import create_series_with_explicit_dtype
-from pandas.core.generic import NDFrame
from pandas.core.reshape.concat import concat
from pandas.core.shared_docs import _shared_docs
@@ -73,6 +73,9 @@
)
from pandas.io.parsers.readers import validate_integer
+if TYPE_CHECKING:
+ from pandas.core.generic import NDFrame
+
FrameSeriesStrT = TypeVar("FrameSeriesStrT", bound=Literal["frame", "series"])
loads = json.loads
diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py
index 21e8bb5f9e89f..2305c209936b6 100644
--- a/pandas/io/parsers/arrow_parser_wrapper.py
+++ b/pandas/io/parsers/arrow_parser_wrapper.py
@@ -1,14 +1,17 @@
from __future__ import annotations
+from typing import TYPE_CHECKING
+
from pandas._typing import ReadBuffer
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.inference import is_integer
-from pandas.core.frame import DataFrame
-
from pandas.io.parsers.base_parser import ParserBase
+if TYPE_CHECKING:
+ from pandas import DataFrame
+
class ArrowParserWrapper(ParserBase):
"""
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py
index 531fa5400f466..0e40e47bf7cb1 100644
--- a/pandas/io/parsers/base_parser.py
+++ b/pandas/io/parsers/base_parser.py
@@ -7,6 +7,7 @@
from enum import Enum
import itertools
from typing import (
+ TYPE_CHECKING,
Any,
Callable,
DefaultDict,
@@ -59,7 +60,6 @@
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
-from pandas import DataFrame
from pandas.core import algorithms
from pandas.core.arrays import Categorical
from pandas.core.indexes.api import (
@@ -72,6 +72,9 @@
from pandas.io.date_converters import generic_parser
+if TYPE_CHECKING:
+ from pandas import DataFrame
+
class ParserBase:
class BadLineHandleMethod(Enum):
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index 711d0857a5a1c..aec999e40b0f5 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -3,6 +3,7 @@
from collections import defaultdict
from io import TextIOWrapper
from typing import (
+ TYPE_CHECKING,
Hashable,
Mapping,
Sequence,
@@ -28,10 +29,6 @@
from pandas.core.dtypes.concat import union_categoricals
from pandas.core.dtypes.dtypes import ExtensionDtype
-from pandas import (
- Index,
- MultiIndex,
-)
from pandas.core.indexes.api import ensure_index_from_sequences
from pandas.io.parsers.base_parser import (
@@ -39,6 +36,12 @@
is_index_col,
)
+if TYPE_CHECKING:
+ from pandas import (
+ Index,
+ MultiIndex,
+ )
+
class CParserWrapper(ParserBase):
low_memory: bool
diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py
index 3e897f9b1334e..7c03a81dbc0e6 100644
--- a/pandas/io/parsers/python_parser.py
+++ b/pandas/io/parsers/python_parser.py
@@ -10,6 +10,7 @@
import sys
from typing import (
IO,
+ TYPE_CHECKING,
DefaultDict,
Hashable,
Iterator,
@@ -38,16 +39,17 @@
from pandas.core.dtypes.common import is_integer
from pandas.core.dtypes.inference import is_dict_like
-from pandas import (
- Index,
- MultiIndex,
-)
-
from pandas.io.parsers.base_parser import (
ParserBase,
parser_defaults,
)
+if TYPE_CHECKING:
+ from pandas import (
+ Index,
+ MultiIndex,
+ )
+
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
diff --git a/pandas/io/spss.py b/pandas/io/spss.py
index 533cf7a7a6331..1b83d339a2990 100644
--- a/pandas/io/spss.py
+++ b/pandas/io/spss.py
@@ -1,16 +1,20 @@
from __future__ import annotations
from pathlib import Path
-from typing import Sequence
+from typing import (
+ TYPE_CHECKING,
+ Sequence,
+)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.inference import is_list_like
-from pandas.core.api import DataFrame
-
from pandas.io.common import stringify_path
+if TYPE_CHECKING:
+ from pandas import DataFrame
+
def read_spss(
path: str | Path,
diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index d52482fe2ef5a..fbe3e41be88a9 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -34,7 +34,6 @@
from pandas.core.dtypes.common import is_list_like
-from pandas.core.frame import DataFrame
from pandas.core.shared_docs import _shared_docs
from pandas.io.common import (
@@ -55,6 +54,8 @@
_XSLTResultTree,
)
+ from pandas import DataFrame
+
@doc(
storage_options=_shared_docs["storage_options"],
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 8510a7acac117..51a081645373e 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -9,6 +9,7 @@
)
import functools
from typing import (
+ TYPE_CHECKING,
Any,
Final,
Iterator,
@@ -32,7 +33,6 @@
to_offset,
)
from pandas._libs.tslibs.dtypes import FreqGroup
-from pandas._libs.tslibs.offsets import BaseOffset
from pandas._typing import F
from pandas.core.dtypes.common import (
@@ -57,6 +57,9 @@
)
import pandas.core.tools.datetimes as tools
+if TYPE_CHECKING:
+ from pandas._libs.tslibs.offsets import BaseOffset
+
# constants
HOURS_PER_DAY: Final = 24.0
MIN_PER_HOUR: Final = 60.0
diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py
index 62242a4a2ddab..3ca00ae41d587 100644
--- a/pandas/plotting/_matplotlib/hist.py
+++ b/pandas/plotting/_matplotlib/hist.py
@@ -22,8 +22,6 @@
remove_na_arraylike,
)
-from pandas.core.frame import DataFrame
-
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.core import (
LinePlot,
@@ -44,6 +42,8 @@
if TYPE_CHECKING:
from matplotlib.axes import Axes
+ from pandas import DataFrame
+
class HistPlot(LinePlot):
@property
| These are found by flake8-type-checking. There are more cases (all of the _typing imports).
If flake8-type-checking should be run on the CI, it would be good to move all the _typing imports inside TYPE_CHECKING (not sure whether we want to do that). | https://api.github.com/repos/pandas-dev/pandas/pulls/47968 | 2022-08-04T16:45:56Z | 2022-08-04T18:53:02Z | 2022-08-04T18:53:02Z | 2022-09-10T01:38:48Z |
CI: fix mypy typing errors (pin numpy) on branch 1.4.x | diff --git a/environment.yml b/environment.yml
index 83b00c0dd6421..a20ccc797eb79 100644
--- a/environment.yml
+++ b/environment.yml
@@ -3,7 +3,7 @@ channels:
- conda-forge
dependencies:
# required
- - numpy>=1.18.5
+ - numpy>=1.18.5, <=1.22.4 # GH#47569
- python=3.8
- python-dateutil>=2.8.1
- pytz
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 05a9f0426440d..aba5a3b5a23d5 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,7 +1,7 @@
# This file is auto-generated from environment.yml, do not modify.
# See that file for comments about the need/usage of each dependency.
-numpy>=1.18.5
+numpy>=1.18.5, <=1.22.4
python-dateutil>=2.8.1
pytz
asv
| - [ ] closes #47569 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
cc @phofl @twoertwein
I have checked pyright with `pre-commit run -a --hook-stage manual pyright` whereas on ci we are installing pyright but hopefully the versions are the same. | https://api.github.com/repos/pandas-dev/pandas/pulls/47967 | 2022-08-04T16:33:11Z | 2022-08-04T18:10:14Z | 2022-08-04T18:10:14Z | 2022-08-04T18:46:31Z |
Backport PR #47951 on branch 1.4.x (DOC: Fix missing s3 file in read xml doc example) | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 705861a3aa568..862441e9b4cad 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3032,15 +3032,15 @@ Read in the content of the "books.xml" as instance of ``StringIO`` or
df = pd.read_xml(bio)
df
-Even read XML from AWS S3 buckets such as Python Software Foundation's IRS 990 Form:
+Even read XML from AWS S3 buckets such as NIH NCBI PMC Article Datasets providing
+Biomedical and Life Science Jorurnals:
.. ipython:: python
:okwarning:
df = pd.read_xml(
- "s3://irs-form-990/201923199349319487_public.xml",
- xpath=".//irs:Form990PartVIISectionAGrp",
- namespaces={"irs": "http://www.irs.gov/efile"}
+ "s3://pmc-oa-opendata/oa_comm/xml/all/PMC1236943.xml",
+ xpath=".//journal-meta",
)
df
| Backport PR #47951: DOC: Fix missing s3 file in read xml doc example | https://api.github.com/repos/pandas-dev/pandas/pulls/47957 | 2022-08-04T06:25:49Z | 2022-08-04T09:45:49Z | 2022-08-04T09:45:49Z | 2022-08-04T09:45:49Z |
Backport PR #47949 on branch 1.4.x (TST: Add test for loc not updating cache correctly) (#47949) | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 2b3223e0ff768..dce8fb60ecdd6 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`)
+- Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`)
- Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`)
-
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 76da8e3c38b0f..a7af569e397eb 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1300,6 +1300,29 @@ def test_loc_expand_empty_frame_keep_midx_names(self):
)
tm.assert_frame_equal(df, expected)
+ def test_loc_internals_not_updated_correctly(self):
+ # GH#47867 all steps are necessary to reproduce the initial bug
+ df = DataFrame(
+ {"bool_col": True, "a": 1, "b": 2.5},
+ index=MultiIndex.from_arrays([[1, 2], [1, 2]], names=["idx1", "idx2"]),
+ )
+ idx = [(1, 1)]
+
+ df["c"] = 3
+ df.loc[idx, "c"] = 0
+
+ df.loc[idx, "c"]
+ df.loc[idx, ["a", "b"]]
+
+ df.loc[idx, "c"] = 15
+ result = df.loc[idx, "c"]
+ expected = df = Series(
+ 15,
+ index=MultiIndex.from_arrays([[1], [1]], names=["idx1", "idx2"]),
+ name="c",
+ )
+ tm.assert_series_equal(result, expected)
+
class TestDataFrameIndexingUInt64:
def test_setitem(self, uint64_frame):
| Backport PR #47949 | https://api.github.com/repos/pandas-dev/pandas/pulls/47954 | 2022-08-03T23:17:35Z | 2022-08-04T09:11:53Z | 2022-08-04T09:11:53Z | 2022-08-04T13:52:57Z |
DOC: Fix missing s3 file in read xml doc example | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 25625dba1080f..0f6d67a4f6f63 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3054,15 +3054,15 @@ Read in the content of the "books.xml" as instance of ``StringIO`` or
df = pd.read_xml(bio)
df
-Even read XML from AWS S3 buckets such as Python Software Foundation's IRS 990 Form:
+Even read XML from AWS S3 buckets such as NIH NCBI PMC Article Datasets providing
+Biomedical and Life Science Jorurnals:
.. ipython:: python
:okwarning:
df = pd.read_xml(
- "s3://irs-form-990/201923199349319487_public.xml",
- xpath=".//irs:Form990PartVIISectionAGrp",
- namespaces={"irs": "http://www.irs.gov/efile"}
+ "s3://pmc-oa-opendata/oa_comm/xml/all/PMC1236943.xml",
+ xpath=".//journal-meta",
)
df
| - [x] closes #47945 (Replace xxxx with the Github issue number)
This returns now
```
journal-id journal-title issn publisher
0 Cardiovasc Ultrasound Cardiovascular Ultrasound 1476-7120 NaN
```
Fixes ci for now, happy to open another issue, if someone thinks we could use a better file
| https://api.github.com/repos/pandas-dev/pandas/pulls/47951 | 2022-08-03T21:13:15Z | 2022-08-04T06:25:09Z | 2022-08-04T06:25:09Z | 2022-08-04T13:52:43Z |
TST: Add test for loc not updating cache correctly | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 89487bfde94a5..a8d47067a9a86 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`)
+- Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`)
- Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`)
-
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index edcd577dd948d..24d7365b52159 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1317,6 +1317,29 @@ def test_iloc_setitem_enlarge_no_warning(self):
df.iloc[:, 0] = np.array([1, 2], dtype=np.float64)
tm.assert_frame_equal(view, expected)
+ def test_loc_internals_not_updated_correctly(self):
+ # GH#47867 all steps are necessary to reproduce the initial bug
+ df = DataFrame(
+ {"bool_col": True, "a": 1, "b": 2.5},
+ index=MultiIndex.from_arrays([[1, 2], [1, 2]], names=["idx1", "idx2"]),
+ )
+ idx = [(1, 1)]
+
+ df["c"] = 3
+ df.loc[idx, "c"] = 0
+
+ df.loc[idx, "c"]
+ df.loc[idx, ["a", "b"]]
+
+ df.loc[idx, "c"] = 15
+ result = df.loc[idx, "c"]
+ expected = df = Series(
+ 15,
+ index=MultiIndex.from_arrays([[1], [1]], names=["idx1", "idx2"]),
+ name="c",
+ )
+ tm.assert_series_equal(result, expected)
+
class TestDataFrameIndexingUInt64:
def test_setitem(self, uint64_frame):
| - [x] closes #47867 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
All the setitem and getitem steps are necessary to produce the bug. Reduced it as far as possible | https://api.github.com/repos/pandas-dev/pandas/pulls/47949 | 2022-08-03T19:49:15Z | 2022-08-03T22:55:49Z | 2022-08-03T22:55:49Z | 2022-08-04T09:08:04Z |
DOC: Plotting / Visualization without Jupyter | diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index d6426fe8bed2d..5ce2f7ca599a7 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -6,6 +6,11 @@
Chart visualization
*******************
+
+.. note::
+
+ The examples below assume that you're using `Jupyter <https://jupyter.org/>`_.
+
This section demonstrates visualization through charting. For information on
visualization of tabular data please see the section on `Table Visualization <style.ipynb>`_.
| Added a note at the top of the page clarifying that the examples assume you're working jupyter. @MarcoGorelli asked to be tagged to this PR. Hopefully I did everything correctly as this is my first PR.
- [x] closes #47861 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47948 | 2022-08-03T18:56:06Z | 2022-08-09T19:12:30Z | 2022-08-09T19:12:30Z | 2023-01-18T15:21:27Z |
REGR: fix error caused by deprecation warning in pd.merge when joining two Series | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 1e7ed256c05ef..49e82f3abe71d 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -19,6 +19,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`)
- Fixed regression in :meth:`DataFrame.loc` not aligning index in some cases when setting a :class:`DataFrame` (:issue:`47578`)
- Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`)
+- Fixed regression in :func:`merge` throwing an error when passing a :class:`Series` with a multi-level name
- Fixed regression in :meth:`DataFrame.eval` creating a copy when updating inplace (:issue:`47449`)
-
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 2a92da99fab82..159ab33a8a04f 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -676,8 +676,8 @@ def __init__(
if _left.columns.nlevels != _right.columns.nlevels:
msg = (
"merging between different levels is deprecated and will be removed "
- f"in a future version. ({left.columns.nlevels} levels on the left, "
- f"{right.columns.nlevels} on the right)"
+ f"in a future version. ({_left.columns.nlevels} levels on the left, "
+ f"{_right.columns.nlevels} on the right)"
)
# stacklevel chosen to be correct when this is reached via pd.merge
# (and not DataFrame.join)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index b7365f98edf61..c7d7d1b0daa50 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -2195,6 +2195,26 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm):
)
+def test_merge_series_multilevel():
+ # GH#47946
+ a = DataFrame(
+ {"A": [1, 2, 3, 4]},
+ index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]),
+ )
+ b = Series(
+ [1, 2, 3, 4],
+ index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]),
+ name=("B", "C"),
+ )
+ expected = DataFrame(
+ {"A": [2, 4], ("B", "C"): [1, 3]},
+ index=MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]),
+ )
+ with tm.assert_produces_warning(FutureWarning):
+ result = merge(a, b, on=["outer", "inner"])
+ tm.assert_frame_equal(result, expected)
+
+
@pytest.mark.parametrize(
"col1, col2, kwargs, expected_cols",
[
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
There is a small bug/typo in the warning of message of `pd.merge` when merging two `Series` of different levels which results in an exception.
Thrown error message:
`AttributeError: 'Series' object has no attribute 'columns'` | https://api.github.com/repos/pandas-dev/pandas/pulls/47946 | 2022-08-03T18:02:05Z | 2022-08-19T12:57:10Z | 2022-08-19T12:57:09Z | 2022-08-22T11:46:07Z |
DOC: more exchange -> interchange renames | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 6e38024e02f36..3225195513fe5 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -14,23 +14,23 @@ including other versions of pandas.
Enhancements
~~~~~~~~~~~~
-.. _whatsnew_150.enhancements.dataframe_exchange:
+.. _whatsnew_150.enhancements.dataframe_interchange:
-DataFrame exchange protocol implementation
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+DataFrame interchange protocol implementation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas now implement the DataFrame exchange API spec.
+Pandas now implement the DataFrame interchange API spec.
See the full details on the API at https://data-apis.org/dataframe-protocol/latest/index.html
The protocol consists of two parts:
- - New method :meth:`DataFrame.__dataframe__` which produces the exchange object.
- It effectively "exports" the Pandas dataframe as an exchange object so
- any other library which has the protocol implemented can "import" that dataframe
- without knowing anything about the producer except that it makes an exchange object.
- - New function :func:`pandas.api.exchange.from_dataframe` which can take
- an arbitrary exchange object from any conformant library and construct a
- Pandas DataFrame out of it.
+- New method :meth:`DataFrame.__dataframe__` which produces the interchange object.
+ It effectively "exports" the pandas dataframe as an interchange object so
+ any other library which has the protocol implemented can "import" that dataframe
+ without knowing anything about the producer except that it makes an interchange object.
+- New function :func:`pandas.api.interchange.from_dataframe` which can take
+ an arbitrary interchange object from any conformant library and construct a
+ pandas DataFrame out of it.
.. _whatsnew_150.enhancements.styler:
diff --git a/pandas/core/interchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py
index ae9b39de54d41..a430e0c66a988 100644
--- a/pandas/core/interchange/from_dataframe.py
+++ b/pandas/core/interchange/from_dataframe.py
@@ -131,7 +131,7 @@ def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:
buffers.append(buf)
pandas_df = pd.DataFrame(columns)
- pandas_df.attrs["_EXCHANGE_PROTOCOL_BUFFERS"] = buffers
+ pandas_df.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"] = buffers
return pandas_df
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 5168e1acc8e7e..4c4c2a99c5558 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -88,8 +88,8 @@ def test_dataframe(data):
expected = from_dataframe(df2.select_columns_by_name(names))
tm.assert_frame_equal(result, expected)
- assert isinstance(result.attrs["_EXCHANGE_PROTOCOL_BUFFERS"], list)
- assert isinstance(expected.attrs["_EXCHANGE_PROTOCOL_BUFFERS"], list)
+ assert isinstance(result.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list)
+ assert isinstance(expected.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list)
def test_missing_from_masked():
| A few more renames as follow-up on https://github.com/pandas-dev/pandas/pull/47888 | https://api.github.com/repos/pandas-dev/pandas/pulls/47940 | 2022-08-03T06:56:47Z | 2022-08-03T14:13:11Z | 2022-08-03T14:13:11Z | 2022-08-10T17:42:48Z |
BUG: avoid relying on external packaging package in interchange protocol | diff --git a/pandas/core/interchange/buffer.py b/pandas/core/interchange/buffer.py
index 1d24efc263ca0..0f62dd00a0f41 100644
--- a/pandas/core/interchange/buffer.py
+++ b/pandas/core/interchange/buffer.py
@@ -1,14 +1,14 @@
from __future__ import annotations
import numpy as np
-from packaging import version
from pandas.core.interchange.dataframe_protocol import (
Buffer,
DlpackDeviceType,
)
+from pandas.util.version import Version
-_NUMPY_HAS_DLPACK = version.parse(np.__version__) >= version.parse("1.22.0")
+_NUMPY_HAS_DLPACK = Version(np.__version__) >= Version("1.22.0")
class PandasBuffer(Buffer):
| I noticed this by accident, and we have a vendored version of `packaging` that we otherwise use internally.
(this might be a good case to add a code check for? cc @MarcoGorelli) | https://api.github.com/repos/pandas-dev/pandas/pulls/47939 | 2022-08-03T06:51:03Z | 2022-08-03T14:20:03Z | 2022-08-03T14:20:03Z | 2022-08-26T07:18:01Z |
WEB: Accept PDEP-1 | diff --git a/web/pandas/pdeps/0001-purpose-and-guidelines.md b/web/pandas/pdeps/0001-purpose-and-guidelines.md
index 085f675974b2e..e09c5bd01204f 100644
--- a/web/pandas/pdeps/0001-purpose-and-guidelines.md
+++ b/web/pandas/pdeps/0001-purpose-and-guidelines.md
@@ -1,7 +1,7 @@
# PDEP-1: Purpose and guidelines
- Created: 3 August 2022
-- Status: Under discussion
+- Status: Accepted
- Discussion: [#47444](https://github.com/pandas-dev/pandas/pull/47444)
- Author: [Marc Garcia](https://github.com/datapythonista)
- Revision: 1
| I forgot to accept PDEP-1 before merging #47444 | https://api.github.com/repos/pandas-dev/pandas/pulls/47938 | 2022-08-03T06:50:35Z | 2022-08-03T14:12:28Z | 2022-08-03T14:12:28Z | 2022-08-06T13:40:27Z |
CLN: remove deprecated DEF | diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index 4a12659aa1f57..9ea0fa73cbc9f 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -21,9 +21,6 @@ import_array()
from pandas._libs.util cimport is_nan
-DEF cROUNDS = 2
-DEF dROUNDS = 4
-
@cython.boundscheck(False)
def hash_object_array(
@@ -162,6 +159,8 @@ cdef uint64_t low_level_siphash(uint8_t* data, size_t datalen,
cdef uint8_t* end = data + datalen - (datalen % sizeof(uint64_t))
cdef int left = datalen & 7
cdef int left_byte
+ cdef int cROUNDS = 2
+ cdef int dROUNDS = 4
b = (<uint64_t>datalen) << 56
v3 ^= k1
| xref https://github.com/cython/cython/issues/4310
discovered in #47442 | https://api.github.com/repos/pandas-dev/pandas/pulls/47936 | 2022-08-03T06:07:03Z | 2022-08-08T22:36:23Z | 2022-08-08T22:36:23Z | 2022-11-18T02:18:38Z |
Fix out-of-bounds heap access in internals.pyx. | diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index be71fe53d35db..159fdbc080fb4 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -469,12 +469,14 @@ def get_blkno_indexers(
n = blknos.shape[0]
result = list()
+
+ if n == 0:
+ return result
+
start = 0
cur_blkno = blknos[start]
- if n == 0:
- pass
- elif group is False:
+ if group is False:
for i in range(1, n):
if blknos[i] != cur_blkno:
result.append((cur_blkno, slice(start, i)))
| If blknos is empty, we unconditionally access blknos[start] where start
is 0. This is an out-of-bounds heap access that can be caught by
AddressSanitizer, but it's easy to avoid since
we are going to ignore the result anyway.
No new tests, because this does not change any observable behaviors. | https://api.github.com/repos/pandas-dev/pandas/pulls/47935 | 2022-08-03T01:05:02Z | 2022-08-08T22:39:27Z | 2022-08-08T22:39:27Z | 2022-08-08T22:40:00Z |
ENH: copy kwd for add_suffix/add_prefix | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 6e38024e02f36..cd93329a75c8a 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -279,6 +279,7 @@ Other enhancements
- :class:`Series` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) will now successfully operate when the dtype is numeric and ``numeric_only=True`` is provided; previously this would raise a ``NotImplementedError`` (:issue:`47500`)
- :meth:`RangeIndex.union` now can return a :class:`RangeIndex` instead of a :class:`Int64Index` if the resulting values are equally spaced (:issue:`47557`, :issue:`43885`)
- :meth:`DataFrame.compare` now accepts an argument ``result_names`` to allow the user to specify the result's names of both left and right DataFrame which are being compared. This is by default ``'self'`` and ``'other'`` (:issue:`44354`)
+- :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support a ``copy`` argument. If ``False``, the underlying data is not copied in the returned object (:issue:`47934`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.notable_bug_fixes:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7352ad2a4985d..499912596bdd9 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4549,7 +4549,7 @@ def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True)
@final
- def add_prefix(self: NDFrameT, prefix: str) -> NDFrameT:
+ def add_prefix(self: NDFrameT, prefix: str, copy: bool_t = True) -> NDFrameT:
"""
Prefix labels with string `prefix`.
@@ -4560,6 +4560,10 @@ def add_prefix(self: NDFrameT, prefix: str) -> NDFrameT:
----------
prefix : str
The string to add before each label.
+ copy : bool, default True
+ Whether to copy the underlying data.
+
+ .. versionadded:: 1.5.0
Returns
-------
@@ -4610,10 +4614,10 @@ def add_prefix(self: NDFrameT, prefix: str) -> NDFrameT:
# expected "NDFrameT")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
- return self._rename(**mapper) # type: ignore[return-value, arg-type]
+ return self._rename(**mapper, copy=copy) # type: ignore[return-value, arg-type]
@final
- def add_suffix(self: NDFrameT, suffix: str) -> NDFrameT:
+ def add_suffix(self: NDFrameT, suffix: str, copy: bool_t = True) -> NDFrameT:
"""
Suffix labels with string `suffix`.
@@ -4624,6 +4628,10 @@ def add_suffix(self: NDFrameT, suffix: str) -> NDFrameT:
----------
suffix : str
The string to add after each label.
+ copy : bool, default True
+ Whether to copy the underlying data.
+
+ .. versionadded:: 1.5.0
Returns
-------
@@ -4674,7 +4682,7 @@ def add_suffix(self: NDFrameT, suffix: str) -> NDFrameT:
# expected "NDFrameT")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
- return self._rename(**mapper) # type: ignore[return-value, arg-type]
+ return self._rename(**mapper, copy=copy) # type: ignore[return-value, arg-type]
@overload
def sort_values(
diff --git a/pandas/tests/frame/methods/test_add_prefix_suffix.py b/pandas/tests/frame/methods/test_add_prefix_suffix.py
index ea75e9ff51552..cc36a3caf6ec7 100644
--- a/pandas/tests/frame/methods/test_add_prefix_suffix.py
+++ b/pandas/tests/frame/methods/test_add_prefix_suffix.py
@@ -18,3 +18,56 @@ def test_add_prefix_suffix(float_frame):
with_pct_suffix = float_frame.add_suffix("%")
expected = Index([f"{c}%" for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
+
+
+def test_add_prefix_suffix_copy(float_frame):
+ # GH#47934
+ ser = float_frame.iloc[0]
+
+ with_prefix = float_frame.add_prefix("foo#", copy=True)
+ expected = Index([f"foo#{c}" for c in float_frame.columns])
+ tm.assert_index_equal(with_prefix.columns, expected)
+ assert not any(
+ tm.shares_memory(float_frame.iloc[:, i], with_prefix.iloc[:, i])
+ for i in range(float_frame.shape[1])
+ )
+
+ ser_with_prefix = ser.add_prefix("foo#", copy=True)
+ tm.assert_index_equal(ser_with_prefix.index, expected)
+ assert not tm.shares_memory(ser_with_prefix, ser)
+
+ with_prefix = float_frame.add_prefix("foo#", copy=False)
+ expected = Index([f"foo#{c}" for c in float_frame.columns])
+ tm.assert_index_equal(with_prefix.columns, expected)
+ assert all(
+ tm.shares_memory(float_frame.iloc[:, i], with_prefix.iloc[:, i])
+ for i in range(float_frame.shape[1])
+ )
+
+ ser_with_prefix = ser.add_prefix("foo#", copy=False)
+ tm.assert_index_equal(ser_with_prefix.index, expected)
+ assert tm.shares_memory(ser_with_prefix, ser)
+
+ with_suffix = float_frame.add_suffix("#foo", copy=True)
+ expected = Index([f"{c}#foo" for c in float_frame.columns])
+ tm.assert_index_equal(with_suffix.columns, expected)
+ assert not any(
+ tm.shares_memory(float_frame.iloc[:, i], with_suffix.iloc[:, i])
+ for i in range(float_frame.shape[1])
+ )
+
+ ser_with_suffix = ser.add_suffix("#foo", copy=True)
+ tm.assert_index_equal(ser_with_suffix.index, expected)
+ assert not tm.shares_memory(ser_with_suffix, ser)
+
+ with_suffix = float_frame.add_suffix("#foo", copy=False)
+ expected = Index([f"{c}#foo" for c in float_frame.columns])
+ tm.assert_index_equal(with_suffix.columns, expected)
+ assert all(
+ tm.shares_memory(float_frame.iloc[:, i], with_suffix.iloc[:, i])
+ for i in range(float_frame.shape[1])
+ )
+
+ ser_with_suffix = ser.add_suffix("#foo", copy=False)
+ tm.assert_index_equal(ser_with_suffix.index, expected)
+ assert tm.shares_memory(ser_with_suffix, ser)
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47934 | 2022-08-02T23:12:16Z | 2022-08-10T00:13:33Z | 2022-08-10T00:13:33Z | 2022-08-10T01:23:54Z |
ENH: copy keyword to set_axis | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 02db4cbe0e8a5..1913e3857f2d3 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -294,6 +294,7 @@ Other enhancements
- :meth:`RangeIndex.union` now can return a :class:`RangeIndex` instead of a :class:`Int64Index` if the resulting values are equally spaced (:issue:`47557`, :issue:`43885`)
- :meth:`DataFrame.compare` now accepts an argument ``result_names`` to allow the user to specify the result's names of both left and right DataFrame which are being compared. This is by default ``'self'`` and ``'other'`` (:issue:`44354`)
- :class:`Interval` now supports checking whether one interval is contained by another interval (:issue:`46613`)
+- Added ``copy`` keyword to :meth:`Series.set_axis` and :meth:`DataFrame.set_axis` to allow user to set axis on a new object without necessarily copying the underlying data (:issue:`47932`)
- :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support a ``copy`` argument. If ``False``, the underlying data is not copied in the returned object (:issue:`47934`)
- :meth:`DataFrame.set_index` now supports a ``copy`` keyword. If ``False``, the underlying data is not copied when a new :class:`DataFrame` is returned (:issue:`48043`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6cfca4ebdc612..9adcfddc4006c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5036,17 +5036,34 @@ def align(
@overload
def set_axis(
- self, labels, *, axis: Axis = ..., inplace: Literal[False] = ...
+ self,
+ labels,
+ *,
+ axis: Axis = ...,
+ inplace: Literal[False] = ...,
+ copy: bool | lib.NoDefault = ...,
) -> DataFrame:
...
@overload
- def set_axis(self, labels, *, axis: Axis = ..., inplace: Literal[True]) -> None:
+ def set_axis(
+ self,
+ labels,
+ *,
+ axis: Axis = ...,
+ inplace: Literal[True],
+ copy: bool | lib.NoDefault = ...,
+ ) -> None:
...
@overload
def set_axis(
- self, labels, *, axis: Axis = ..., inplace: bool = ...
+ self,
+ labels,
+ *,
+ axis: Axis = ...,
+ inplace: bool = ...,
+ copy: bool | lib.NoDefault = ...,
) -> DataFrame | None:
...
@@ -5091,10 +5108,15 @@ def set_axis(
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
- def set_axis( # type: ignore[override]
- self, labels, axis: Axis = 0, inplace: bool = False
- ) -> DataFrame | None:
- return super().set_axis(labels, axis=axis, inplace=inplace)
+ def set_axis(
+ self,
+ labels,
+ axis: Axis = 0,
+ inplace: bool = False,
+ *,
+ copy: bool | lib.NoDefault = lib.no_default,
+ ):
+ return super().set_axis(labels, axis=axis, inplace=inplace, copy=copy)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d9264e8a18f2e..7de75be78e6dd 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -711,23 +711,45 @@ def size(self) -> int:
@overload
def set_axis(
- self: NDFrameT, labels, *, axis: Axis = ..., inplace: Literal[False] = ...
+ self: NDFrameT,
+ labels,
+ *,
+ axis: Axis = ...,
+ inplace: Literal[False] = ...,
+ copy: bool_t | lib.NoDefault = ...,
) -> NDFrameT:
...
@overload
- def set_axis(self, labels, *, axis: Axis = ..., inplace: Literal[True]) -> None:
+ def set_axis(
+ self,
+ labels,
+ *,
+ axis: Axis = ...,
+ inplace: Literal[True],
+ copy: bool_t | lib.NoDefault = ...,
+ ) -> None:
...
@overload
def set_axis(
- self: NDFrameT, labels, *, axis: Axis = ..., inplace: bool_t = ...
+ self: NDFrameT,
+ labels,
+ *,
+ axis: Axis = ...,
+ inplace: bool_t = ...,
+ copy: bool_t | lib.NoDefault = ...,
) -> NDFrameT | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
def set_axis(
- self: NDFrameT, labels, axis: Axis = 0, inplace: bool_t = False
+ self: NDFrameT,
+ labels,
+ axis: Axis = 0,
+ inplace: bool_t = False,
+ *,
+ copy: bool_t | lib.NoDefault = lib.no_default,
) -> NDFrameT | None:
"""
Assign desired index to given axis.
@@ -747,6 +769,11 @@ def set_axis(
inplace : bool, default False
Whether to return a new %(klass)s instance.
+ copy : bool, default True
+ Whether to make a copy of the underlying data.
+
+ .. versionadded:: 1.5.0
+
Returns
-------
renamed : %(klass)s or None
@@ -756,16 +783,25 @@ def set_axis(
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
+ if inplace:
+ if copy is True:
+ raise ValueError("Cannot specify both inplace=True and copy=True")
+ copy = False
+ elif copy is lib.no_default:
+ copy = True
+
self._check_inplace_and_allows_duplicate_labels(inplace)
- return self._set_axis_nocheck(labels, axis, inplace)
+ return self._set_axis_nocheck(labels, axis, inplace, copy=copy)
@final
- def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t):
+ def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t, copy: bool_t):
# NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy.
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
- obj = self.copy()
+ # With copy=False, we create a new object but don't copy the
+ # underlying data.
+ obj = self.copy(deep=copy)
obj.set_axis(labels, axis=axis, inplace=True)
return obj
@@ -1053,7 +1089,7 @@ def _rename(
raise KeyError(f"{missing_labels} not found in axis")
new_index = ax._transform_index(f, level=level)
- result._set_axis_nocheck(new_index, axis=axis_no, inplace=True)
+ result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False)
result._clear_item_cache()
if inplace:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b1ad3ab175d1b..f55d6a26255a0 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4976,17 +4976,34 @@ def rename(
@overload
def set_axis(
- self, labels, *, axis: Axis = ..., inplace: Literal[False] = ...
+ self,
+ labels,
+ *,
+ axis: Axis = ...,
+ inplace: Literal[False] = ...,
+ copy: bool | lib.NoDefault = ...,
) -> Series:
...
@overload
- def set_axis(self, labels, *, axis: Axis = ..., inplace: Literal[True]) -> None:
+ def set_axis(
+ self,
+ labels,
+ *,
+ axis: Axis = ...,
+ inplace: Literal[True],
+ copy: bool | lib.NoDefault = ...,
+ ) -> None:
...
@overload
def set_axis(
- self, labels, *, axis: Axis = ..., inplace: bool = ...
+ self,
+ labels,
+ *,
+ axis: Axis = ...,
+ inplace: bool = ...,
+ copy: bool | lib.NoDefault = ...,
) -> Series | None:
...
@@ -5018,9 +5035,13 @@ def set_axis(
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis( # type: ignore[override]
- self, labels, axis: Axis = 0, inplace: bool = False
+ self,
+ labels,
+ axis: Axis = 0,
+ inplace: bool = False,
+ copy: bool | lib.NoDefault = lib.no_default,
) -> Series | None:
- return super().set_axis(labels, axis=axis, inplace=inplace)
+ return super().set_axis(labels, axis=axis, inplace=inplace, copy=copy)
# error: Cannot determine type of 'reindex'
@doc(
diff --git a/pandas/tests/frame/methods/test_set_axis.py b/pandas/tests/frame/methods/test_set_axis.py
index 3284243ddac48..67488dff3c335 100644
--- a/pandas/tests/frame/methods/test_set_axis.py
+++ b/pandas/tests/frame/methods/test_set_axis.py
@@ -24,6 +24,69 @@ def test_set_axis(self, obj):
result = obj.set_axis(new_index, axis=0, inplace=False)
tm.assert_equal(expected, result)
+ def test_set_axis_copy(self, obj):
+ # Test copy keyword GH#47932
+ new_index = list("abcd")[: len(obj)]
+
+ orig = obj.iloc[:]
+ expected = obj.copy()
+ expected.index = new_index
+
+ with pytest.raises(
+ ValueError, match="Cannot specify both inplace=True and copy=True"
+ ):
+ obj.set_axis(new_index, axis=0, inplace=True, copy=True)
+
+ result = obj.set_axis(new_index, axis=0, copy=True)
+ tm.assert_equal(expected, result)
+ assert result is not obj
+ # check we DID make a copy
+ if obj.ndim == 1:
+ assert not tm.shares_memory(result, obj)
+ else:
+ assert not any(
+ tm.shares_memory(result.iloc[:, i], obj.iloc[:, i])
+ for i in range(obj.shape[1])
+ )
+
+ result = obj.set_axis(new_index, axis=0, copy=False)
+ tm.assert_equal(expected, result)
+ assert result is not obj
+ # check we did NOT make a copy
+ if obj.ndim == 1:
+ assert tm.shares_memory(result, obj)
+ else:
+ assert all(
+ tm.shares_memory(result.iloc[:, i], obj.iloc[:, i])
+ for i in range(obj.shape[1])
+ )
+
+ # copy defaults to True
+ result = obj.set_axis(new_index, axis=0)
+ tm.assert_equal(expected, result)
+ assert result is not obj
+ # check we DID make a copy
+ if obj.ndim == 1:
+ assert not tm.shares_memory(result, obj)
+ else:
+ assert not any(
+ tm.shares_memory(result.iloc[:, i], obj.iloc[:, i])
+ for i in range(obj.shape[1])
+ )
+
+ # Do this last since it alters obj inplace
+ res = obj.set_axis(new_index, inplace=True, copy=False)
+ assert res is None
+ tm.assert_equal(expected, obj)
+ # check we did NOT make a copy
+ if obj.ndim == 1:
+ assert tm.shares_memory(obj, orig)
+ else:
+ assert all(
+ tm.shares_memory(obj.iloc[:, i], orig.iloc[:, i])
+ for i in range(obj.shape[1])
+ )
+
@pytest.mark.parametrize("axis", [0, "index", 1, "columns"])
def test_set_axis_inplace_axis(self, axis, obj):
# GH#14636
| This also opens the option of deprecating `inplace` here.
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47932 | 2022-08-02T17:37:08Z | 2022-08-17T00:56:12Z | 2022-08-17T00:56:12Z | 2022-08-17T00:58:22Z |
ENH: Support For Interval __contains__ Other Interval (#46613) | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index b71d294b97f9a..e54819e31c90f 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -292,6 +292,7 @@ Other enhancements
- :class:`Series` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) will now successfully operate when the dtype is numeric and ``numeric_only=True`` is provided; previously this would raise a ``NotImplementedError`` (:issue:`47500`)
- :meth:`RangeIndex.union` now can return a :class:`RangeIndex` instead of a :class:`Int64Index` if the resulting values are equally spaced (:issue:`47557`, :issue:`43885`)
- :meth:`DataFrame.compare` now accepts an argument ``result_names`` to allow the user to specify the result's names of both left and right DataFrame which are being compared. This is by default ``'self'`` and ``'other'`` (:issue:`44354`)
+- :class:`Interval` now supports checking whether one interval is contained by another interval (:issue:`46613`)
- :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support a ``copy`` argument. If ``False``, the underlying data is not copied in the returned object (:issue:`47934`)
- :meth:`DataFrame.set_index` now supports a ``copy`` keyword. If ``False``, the underlying data is not copied when a new :class:`DataFrame` is returned (:issue:`48043`)
diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi
index b27d2b5f8fd4d..9b73e9d0bf54a 100644
--- a/pandas/_libs/interval.pyi
+++ b/pandas/_libs/interval.pyi
@@ -79,10 +79,17 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
def __hash__(self) -> int: ...
@overload
def __contains__(
- self: Interval[_OrderableTimesT], key: _OrderableTimesT
+ self: Interval[Timedelta], key: Timedelta | Interval[Timedelta]
) -> bool: ...
@overload
- def __contains__(self: Interval[_OrderableScalarT], key: float) -> bool: ...
+ def __contains__(
+ self: Interval[Timestamp], key: Timestamp | Interval[Timestamp]
+ ) -> bool: ...
+ @overload
+ def __contains__(
+ self: Interval[_OrderableScalarT],
+ key: _OrderableScalarT | Interval[_OrderableScalarT],
+ ) -> bool: ...
@overload
def __add__(
self: Interval[_OrderableTimesT], y: Timedelta
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index bcd85f915e4a2..7cacc8cc639f7 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -299,10 +299,12 @@ cdef class Interval(IntervalMixin):
>>> iv
Interval(0, 5, inclusive='right')
- You can check if an element belongs to it
+ You can check if an element belongs to it, or if it contains another interval:
>>> 2.5 in iv
True
+ >>> pd.Interval(left=2, right=5, inclusive='both') in iv
+ True
You can test the bounds (``inclusive='right'``, so ``0 < x <= 5``):
@@ -412,7 +414,17 @@ cdef class Interval(IntervalMixin):
def __contains__(self, key) -> bool:
if _interval_like(key):
- raise TypeError("__contains__ not defined for two intervals")
+ key_closed_left = key.inclusive in ('left', 'both')
+ key_closed_right = key.inclusive in ('right', 'both')
+ if self.open_left and key_closed_left:
+ left_contained = self.left < key.left
+ else:
+ left_contained = self.left <= key.left
+ if self.open_right and key_closed_right:
+ right_contained = key.right < self.right
+ else:
+ right_contained = key.right <= self.right
+ return left_contained and right_contained
return ((self.left < key if self.open_left else self.left <= key) and
(key < self.right if self.open_right else key <= self.right))
diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py
index 878b5e6ec0167..c5644b2f36ead 100644
--- a/pandas/tests/scalar/interval/test_interval.py
+++ b/pandas/tests/scalar/interval/test_interval.py
@@ -36,10 +36,6 @@ def test_contains(self, interval):
assert 1 in interval
assert 0 not in interval
- msg = "__contains__ not defined for two intervals"
- with pytest.raises(TypeError, match=msg):
- interval in interval
-
interval_both = Interval(0, 1, "both")
assert 0 in interval_both
assert 1 in interval_both
diff --git a/pandas/tests/scalar/interval/test_ops.py b/pandas/tests/scalar/interval/test_ops.py
index 9fe40c208d880..92db6ac772830 100644
--- a/pandas/tests/scalar/interval/test_ops.py
+++ b/pandas/tests/scalar/interval/test_ops.py
@@ -66,3 +66,54 @@ def test_overlaps_invalid_type(self, other):
msg = f"`other` must be an Interval, got {type(other).__name__}"
with pytest.raises(TypeError, match=msg):
interval.overlaps(other)
+
+
+class TestContains:
+ def test_contains_interval(self, inclusive_endpoints_fixture):
+ interval1 = Interval(0, 1, "both")
+ interval2 = Interval(0, 1, inclusive_endpoints_fixture)
+ assert interval1 in interval1
+ assert interval2 in interval2
+ assert interval2 in interval1
+ assert interval1 not in interval2 or inclusive_endpoints_fixture == "both"
+
+ def test_contains_infinite_length(self):
+ interval1 = Interval(0, 1, "both")
+ interval2 = Interval(float("-inf"), float("inf"), "neither")
+ assert interval1 in interval2
+ assert interval2 not in interval1
+
+ def test_contains_zero_length(self):
+ interval1 = Interval(0, 1, "both")
+ interval2 = Interval(-1, -1, "both")
+ interval3 = Interval(0.5, 0.5, "both")
+ assert interval2 not in interval1
+ assert interval3 in interval1
+ assert interval2 not in interval3 and interval3 not in interval2
+ assert interval1 not in interval2 and interval1 not in interval3
+
+ @pytest.mark.parametrize(
+ "type1",
+ [
+ (0, 1),
+ (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)),
+ (Timedelta("0h"), Timedelta("1h")),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "type2",
+ [
+ (0, 1),
+ (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)),
+ (Timedelta("0h"), Timedelta("1h")),
+ ],
+ )
+ def test_contains_mixed_types(self, type1, type2):
+ interval1 = Interval(*type1)
+ interval2 = Interval(*type2)
+ if type1 == type2:
+ assert interval1 in interval2
+ else:
+ msg = "^'<=' not supported between instances of"
+ with pytest.raises(TypeError, match=msg):
+ interval1 in interval2
| - [x] closes #46613
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47927 | 2022-08-02T05:10:12Z | 2022-08-15T16:19:39Z | 2022-08-15T16:19:39Z | 2022-08-15T16:19:52Z |
TYP: pandas.core.series annotations from pandas-stubs | diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index 369e4b3454b65..5e0af3c0bc07d 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -866,7 +866,7 @@ def assert_series_equal(
left,
right,
check_dtype: bool | Literal["equiv"] = True,
- check_index_type="equiv",
+ check_index_type: bool | Literal["equiv"] = "equiv",
check_series_type=True,
check_less_precise: bool | int | NoDefault = no_default,
check_names=True,
@@ -1134,7 +1134,7 @@ def assert_frame_equal(
left,
right,
check_dtype: bool | Literal["equiv"] = True,
- check_index_type="equiv",
+ check_index_type: bool | Literal["equiv"] = "equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 48fd8f2d1256d..88d826ec454b2 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -39,6 +39,7 @@
Timedelta,
Timestamp,
)
+ from pandas._libs.tslibs import BaseOffset
from pandas.core.dtypes.dtypes import ExtensionDtype
@@ -63,7 +64,6 @@
from pandas.core.window.rolling import BaseWindow
from pandas.io.formats.format import EngFormatter
- from pandas.tseries.offsets import DateOffset
# numpy compatible types
NumpyValueArrayLike = Union[npt._ScalarLike_co, npt.ArrayLike]
@@ -113,7 +113,7 @@
Suffixes = Tuple[Optional[str], Optional[str]]
Ordered = Optional[bool]
JSONSerializable = Optional[Union[PythonScalar, List, Dict]]
-Frequency = Union[str, "DateOffset"]
+Frequency = Union[str, "BaseOffset"]
Axes = Union[AnyArrayLike, List, range]
RandomState = Union[
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index f268c24ca766d..043917376b8c1 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -985,7 +985,7 @@ def equals(self, other: object) -> bool:
equal_na = self.isna() & other.isna() # type: ignore[operator]
return bool((equal_values | equal_na).all())
- def isin(self, values) -> np.ndarray:
+ def isin(self, values) -> npt.NDArray[np.bool_]:
"""
Pointwise comparison for set containment in the given values.
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index bd765b4601b01..552c1a82e75e0 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1738,7 +1738,7 @@ def contains(self, other):
other < self._right if self.open_right else other <= self._right
)
- def isin(self, values) -> np.ndarray:
+ def isin(self, values) -> npt.NDArray[np.bool_]:
if not hasattr(values, "dtype"):
values = np.array(values)
values = extract_array(values, extract_numpy=True)
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index caddd12a2c2b4..9e2cbd86e83a8 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -202,7 +202,7 @@ def _maybe_convert_setitem_value(self, value):
raise ValueError("Scalar must be NA or str")
return value
- def isin(self, values):
+ def isin(self, values) -> npt.NDArray[np.bool_]:
if pa_version_under2p0:
fallback_performancewarning(version="2")
return super().isin(values)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 8c4924a2483be..6cfca4ebdc612 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -26,6 +26,7 @@
Iterable,
Iterator,
Literal,
+ Mapping,
Sequence,
cast,
overload,
@@ -68,6 +69,7 @@
Level,
NaPosition,
PythonFuncType,
+ QuantileInterpolation,
ReadBuffer,
Renamer,
Scalar,
@@ -1618,10 +1620,10 @@ def __rmatmul__(self, other) -> DataFrame:
@classmethod
def from_dict(
cls,
- data,
+ data: dict,
orient: str = "columns",
dtype: Dtype | None = None,
- columns=None,
+ columns: Axes | None = None,
) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
@@ -1713,7 +1715,10 @@ def from_dict(
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
- data, index = list(data.values()), list(data.keys())
+ index = list(data.keys())
+ # error: Incompatible types in assignment (expression has type
+ # "List[Any]", variable has type "Dict[Any, Any]")
+ data = list(data.values()) # type: ignore[assignment]
elif orient == "columns" or orient == "tight":
if columns is not None:
raise ValueError(f"cannot use columns parameter with orient='{orient}'")
@@ -1809,7 +1814,25 @@ def to_numpy(
return result
- def to_dict(self, orient: str = "dict", into=dict):
+ @overload
+ def to_dict(
+ self,
+ orient: Literal["dict", "list", "series", "split", "tight", "index"] = ...,
+ into: type[dict] = ...,
+ ) -> dict:
+ ...
+
+ @overload
+ def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]:
+ ...
+
+ def to_dict(
+ self,
+ orient: Literal[
+ "dict", "list", "series", "split", "tight", "records", "index"
+ ] = "dict",
+ into: type[dict] = dict,
+ ) -> dict | list[dict]:
"""
Convert the DataFrame to a dictionary.
@@ -1915,7 +1938,10 @@ def to_dict(self, orient: str = "dict", into=dict):
# GH16122
into_c = com.standardize_mapping(into)
- orient = orient.lower()
+ # error: Incompatible types in assignment (expression has type "str",
+ # variable has type "Literal['dict', 'list', 'series', 'split', 'tight',
+ # 'records', 'index']")
+ orient = orient.lower() # type: ignore[assignment]
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
@@ -2333,7 +2359,7 @@ def maybe_reorder(
return cls(mgr)
def to_records(
- self, index=True, column_dtypes=None, index_dtypes=None
+ self, index: bool = True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
@@ -2442,7 +2468,7 @@ def to_records(
formats = []
for i, v in enumerate(arrays):
- index = i
+ index_int = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
@@ -2453,13 +2479,13 @@ def to_records(
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
- if index < index_len:
+ if index_int < index_len:
dtype_mapping = index_dtypes
- name = index_names[index]
+ name = index_names[index_int]
else:
- index -= index_len
+ index_int -= index_len
dtype_mapping = column_dtypes
- name = self.columns[index]
+ name = self.columns[index_int]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
@@ -2469,8 +2495,8 @@ def to_records(
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
- elif index in dtype_mapping:
- dtype_mapping = dtype_mapping[index]
+ elif index_int in dtype_mapping:
+ dtype_mapping = dtype_mapping[index_int]
else:
dtype_mapping = None
@@ -4984,14 +5010,14 @@ def _reindex_multi(
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
- other,
- join: str = "outer",
+ other: DataFrame,
+ join: Literal["outer", "inner", "left", "right"] = "outer",
axis: Axis | None = None,
- level: Level | None = None,
+ level: Level = None,
copy: bool = True,
fill_value=None,
- method: str | None = None,
- limit=None,
+ method: FillnaOptions | None = None,
+ limit: int | None = None,
fill_axis: Axis = 0,
broadcast_axis: Axis | None = None,
) -> DataFrame:
@@ -5024,6 +5050,7 @@ def set_axis(
) -> DataFrame | None:
...
+ # error: Signature of "set_axis" incompatible with supertype "NDFrame"
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
@Appender(
"""
@@ -5064,7 +5091,9 @@ def set_axis(
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
- def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
+ def set_axis( # type: ignore[override]
+ self, labels, axis: Axis = 0, inplace: bool = False
+ ) -> DataFrame | None:
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@@ -5096,7 +5125,7 @@ def drop(
axis: Axis = ...,
index: IndexLabel = ...,
columns: IndexLabel = ...,
- level: Level | None = ...,
+ level: Level = ...,
inplace: Literal[True],
errors: IgnoreRaise = ...,
) -> None:
@@ -5110,7 +5139,7 @@ def drop(
axis: Axis = ...,
index: IndexLabel = ...,
columns: IndexLabel = ...,
- level: Level | None = ...,
+ level: Level = ...,
inplace: Literal[False] = ...,
errors: IgnoreRaise = ...,
) -> DataFrame:
@@ -5124,7 +5153,7 @@ def drop(
axis: Axis = ...,
index: IndexLabel = ...,
columns: IndexLabel = ...,
- level: Level | None = ...,
+ level: Level = ...,
inplace: bool = ...,
errors: IgnoreRaise = ...,
) -> DataFrame | None:
@@ -5139,7 +5168,7 @@ def drop( # type: ignore[override]
axis: Axis = 0,
index: IndexLabel = None,
columns: IndexLabel = None,
- level: Level | None = None,
+ level: Level = None,
inplace: bool = False,
errors: IgnoreRaise = "raise",
) -> DataFrame | None:
@@ -5300,7 +5329,7 @@ def rename(
axis: Axis | None = ...,
copy: bool = ...,
inplace: Literal[True],
- level: Level | None = ...,
+ level: Level = ...,
errors: IgnoreRaise = ...,
) -> None:
...
@@ -5315,7 +5344,7 @@ def rename(
axis: Axis | None = ...,
copy: bool = ...,
inplace: Literal[False] = ...,
- level: Level | None = ...,
+ level: Level = ...,
errors: IgnoreRaise = ...,
) -> DataFrame:
...
@@ -5330,7 +5359,7 @@ def rename(
axis: Axis | None = ...,
copy: bool = ...,
inplace: bool = ...,
- level: Level | None = ...,
+ level: Level = ...,
errors: IgnoreRaise = ...,
) -> DataFrame | None:
...
@@ -5344,7 +5373,7 @@ def rename(
axis: Axis | None = None,
copy: bool = True,
inplace: bool = False,
- level: Level | None = None,
+ level: Level = None,
errors: IgnoreRaise = "ignore",
) -> DataFrame | None:
"""
@@ -5468,128 +5497,53 @@ def rename(
@overload
def fillna(
self,
- value=...,
+ value: Hashable | Mapping | Series | DataFrame = ...,
+ *,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: Literal[False] = ...,
- limit=...,
- downcast=...,
+ limit: int | None = ...,
+ downcast: dict | None = ...,
) -> DataFrame:
...
@overload
def fillna(
self,
- value,
- method: FillnaOptions | None,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- *,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- value,
- *,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- *,
- method: FillnaOptions | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- *,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
+ value: Hashable | Mapping | Series | DataFrame = ...,
*,
- method: FillnaOptions | None,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- value,
- *,
- axis: Axis | None,
+ method: FillnaOptions | None = ...,
+ axis: Axis | None = ...,
inplace: Literal[True],
- limit=...,
- downcast=...,
+ limit: int | None = ...,
+ downcast: dict | None = ...,
) -> None:
...
@overload
def fillna(
self,
- value,
- method: FillnaOptions | None,
+ value: Hashable | Mapping | Series | DataFrame = ...,
*,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- value=...,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: bool = ...,
- limit=...,
- downcast=...,
+ limit: int | None = ...,
+ downcast: dict | None = ...,
) -> DataFrame | None:
...
+ # error: Signature of "fillna" incompatible with supertype "NDFrame"
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"])
@doc(NDFrame.fillna, **_shared_doc_kwargs)
- def fillna(
+ def fillna( # type: ignore[override]
self,
- value: object | ArrayLike | None = None,
+ value: Hashable | Mapping | Series | DataFrame = None,
method: FillnaOptions | None = None,
axis: Axis | None = None,
inplace: bool = False,
- limit=None,
- downcast=None,
+ limit: int | None = None,
+ downcast: dict | None = None,
) -> DataFrame | None:
return super().fillna(
value=value,
@@ -5730,10 +5684,10 @@ def _replace_columnwise(
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(
self,
- periods=1,
+ periods: int = 1,
freq: Frequency | None = None,
axis: Axis = 0,
- fill_value=lib.no_default,
+ fill_value: Hashable = lib.no_default,
) -> DataFrame:
axis = self._get_axis_number(axis)
@@ -6060,7 +6014,8 @@ def set_index(
@overload
def reset_index(
self,
- level: Hashable | Sequence[Hashable] | None = ...,
+ level: IndexLabel = ...,
+ *,
drop: bool = ...,
inplace: Literal[False] = ...,
col_level: Hashable = ...,
@@ -6073,34 +6028,9 @@ def reset_index(
@overload
def reset_index(
self,
- level: Hashable | Sequence[Hashable] | None,
- drop: bool,
- inplace: Literal[True],
- col_level: Hashable = ...,
- col_fill: Hashable = ...,
- allow_duplicates: bool | lib.NoDefault = ...,
- names: Hashable | Sequence[Hashable] = None,
- ) -> None:
- ...
-
- @overload
- def reset_index(
- self,
- *,
- drop: bool,
- inplace: Literal[True],
- col_level: Hashable = ...,
- col_fill: Hashable = ...,
- allow_duplicates: bool | lib.NoDefault = ...,
- names: Hashable | Sequence[Hashable] = None,
- ) -> None:
- ...
-
- @overload
- def reset_index(
- self,
- level: Hashable | Sequence[Hashable] | None,
+ level: IndexLabel = ...,
*,
+ drop: bool = ...,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
@@ -6112,19 +6042,8 @@ def reset_index(
@overload
def reset_index(
self,
+ level: IndexLabel = ...,
*,
- inplace: Literal[True],
- col_level: Hashable = ...,
- col_fill: Hashable = ...,
- allow_duplicates: bool | lib.NoDefault = ...,
- names: Hashable | Sequence[Hashable] = None,
- ) -> None:
- ...
-
- @overload
- def reset_index(
- self,
- level: Hashable | Sequence[Hashable] | None = ...,
drop: bool = ...,
inplace: bool = ...,
col_level: Hashable = ...,
@@ -6137,7 +6056,7 @@ def reset_index(
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
def reset_index(
self,
- level: Hashable | Sequence[Hashable] | None = None,
+ level: IndexLabel = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
@@ -6596,7 +6515,7 @@ def dropna(
def drop_duplicates(
self,
subset: Hashable | Sequence[Hashable] | None = None,
- keep: Literal["first"] | Literal["last"] | Literal[False] = "first",
+ keep: Literal["first", "last", False] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> DataFrame | None:
@@ -6693,7 +6612,7 @@ def drop_duplicates(
def duplicated(
self,
subset: Hashable | Sequence[Hashable] | None = None,
- keep: Literal["first"] | Literal["last"] | Literal[False] = "first",
+ keep: Literal["first", "last", False] = "first",
) -> Series:
"""
Return boolean Series denoting duplicate rows.
@@ -6839,7 +6758,7 @@ def f(vals) -> tuple[np.ndarray, int]:
@overload # type: ignore[override]
def sort_values(
self,
- by,
+ by: IndexLabel,
*,
axis: Axis = ...,
ascending=...,
@@ -6854,7 +6773,7 @@ def sort_values(
@overload
def sort_values(
self,
- by,
+ by: IndexLabel,
*,
axis: Axis = ...,
ascending=...,
@@ -6873,9 +6792,9 @@ def sort_values(
@Appender(NDFrame.sort_values.__doc__)
def sort_values( # type: ignore[override]
self,
- by,
+ by: IndexLabel,
axis: Axis = 0,
- ascending=True,
+ ascending: bool | list[bool] | tuple[bool, ...] = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
@@ -6887,9 +6806,16 @@ def sort_values( # type: ignore[override]
ascending = validate_ascending(ascending)
if not isinstance(by, list):
by = [by]
- if is_sequence(ascending) and len(by) != len(ascending):
+ # error: Argument 1 to "len" has incompatible type "Union[bool, List[bool]]";
+ # expected "Sized"
+ if is_sequence(ascending) and (
+ len(by) != len(ascending) # type: ignore[arg-type]
+ ):
+ # error: Argument 1 to "len" has incompatible type "Union[bool,
+ # List[bool]]"; expected "Sized"
raise ValueError(
- f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
+ f"Length of ascending ({len(ascending)})" # type: ignore[arg-type]
+ f" != length of by ({len(by)})"
)
if len(by) > 1:
@@ -6948,7 +6874,7 @@ def sort_index(
self,
*,
axis: Axis = ...,
- level: Level | None = ...,
+ level: IndexLabel = ...,
ascending: bool | Sequence[bool] = ...,
inplace: Literal[True],
kind: SortKind = ...,
@@ -6964,7 +6890,7 @@ def sort_index(
self,
*,
axis: Axis = ...,
- level: Level | None = ...,
+ level: IndexLabel = ...,
ascending: bool | Sequence[bool] = ...,
inplace: Literal[False] = ...,
kind: SortKind = ...,
@@ -6980,7 +6906,7 @@ def sort_index(
self,
*,
axis: Axis = ...,
- level: Level | None = ...,
+ level: IndexLabel = ...,
ascending: bool | Sequence[bool] = ...,
inplace: bool = ...,
kind: SortKind = ...,
@@ -6996,7 +6922,7 @@ def sort_index(
def sort_index( # type: ignore[override]
self,
axis: Axis = 0,
- level: Level | None = None,
+ level: IndexLabel = None,
ascending: bool | Sequence[bool] = True,
inplace: bool = False,
kind: SortKind = "quicksort",
@@ -7858,7 +7784,11 @@ def compare(
)
def combine(
- self, other: DataFrame, func, fill_value=None, overwrite: bool = True
+ self,
+ other: DataFrame,
+ func: Callable[[Series, Series], Series | Hashable],
+ fill_value=None,
+ overwrite: bool = True,
) -> DataFrame:
"""
Perform column-wise combine with another DataFrame.
@@ -8020,7 +7950,11 @@ def combine(
if isinstance(new_dtype, np.dtype):
# if new_dtype is an EA Dtype, then `func` is expected to return
# the correct dtype without any additional casting
- arr = maybe_downcast_to_dtype(arr, new_dtype)
+ # error: No overload variant of "maybe_downcast_to_dtype" matches
+ # argument types "Union[Series, Hashable]", "dtype[Any]"
+ arr = maybe_downcast_to_dtype( # type: ignore[call-overload]
+ arr, new_dtype
+ )
result[col] = arr
@@ -9111,7 +9045,7 @@ def melt(
value_vars=None,
var_name=None,
value_name="value",
- col_level: Level | None = None,
+ col_level: Level = None,
ignore_index: bool = True,
) -> DataFrame:
@@ -9379,7 +9313,7 @@ def any(
axis: Axis = 0,
bool_only: bool | None = None,
skipna: bool = True,
- level: Level | None = None,
+ level: Level = None,
**kwargs,
) -> DataFrame | Series:
...
@@ -9404,7 +9338,7 @@ def apply(
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
- result_type=None,
+ result_type: Literal["expand", "reduce", "broadcast"] | None = None,
args=(),
**kwargs,
):
@@ -10471,10 +10405,11 @@ def cov(
def corrwith(
self,
- other,
+ other: DataFrame | Series,
axis: Axis = 0,
- drop=False,
- method="pearson",
+ drop: bool = False,
+ method: Literal["pearson", "kendall", "spearman"]
+ | Callable[[np.ndarray, np.ndarray], float] = "pearson",
numeric_only: bool | lib.NoDefault = lib.no_default,
) -> Series:
"""
@@ -10642,9 +10577,7 @@ def c(x):
# ----------------------------------------------------------------------
# ndarray-like stats methods
- def count(
- self, axis: Axis = 0, level: Level | None = None, numeric_only: bool = False
- ):
+ def count(self, axis: Axis = 0, level: Level = None, numeric_only: bool = False):
"""
Count non-NA cells for each column or row.
@@ -11160,13 +11093,43 @@ def f(s):
return data
+ @overload
+ def quantile(
+ self,
+ q: float = ...,
+ axis: Axis = ...,
+ numeric_only: bool | lib.NoDefault = ...,
+ interpolation: QuantileInterpolation = ...,
+ ) -> Series:
+ ...
+
+ @overload
def quantile(
self,
- q=0.5,
+ q: AnyArrayLike | Sequence[float],
+ axis: Axis = ...,
+ numeric_only: bool | lib.NoDefault = ...,
+ interpolation: QuantileInterpolation = ...,
+ ) -> Series | DataFrame:
+ ...
+
+ @overload
+ def quantile(
+ self,
+ q: float | AnyArrayLike | Sequence[float] = ...,
+ axis: Axis = ...,
+ numeric_only: bool | lib.NoDefault = ...,
+ interpolation: QuantileInterpolation = ...,
+ ) -> Series | DataFrame:
+ ...
+
+ def quantile(
+ self,
+ q: float | AnyArrayLike | Sequence[float] = 0.5,
axis: Axis = 0,
numeric_only: bool | lib.NoDefault = no_default,
- interpolation: str = "linear",
- ):
+ interpolation: QuantileInterpolation = "linear",
+ ) -> Series | DataFrame:
"""
Return values at the given quantile over requested axis.
@@ -11246,8 +11209,14 @@ def quantile(
if not is_list_like(q):
# BlockManager.quantile expects listlike, so we wrap and unwrap here
+ # error: List item 0 has incompatible type "Union[float, Union[Union[
+ # ExtensionArray, ndarray[Any, Any]], Index, Series], Sequence[float]]";
+ # expected "float"
res_df = self.quantile(
- [q], axis=axis, numeric_only=numeric_only, interpolation=interpolation
+ [q], # type: ignore[list-item]
+ axis=axis,
+ numeric_only=numeric_only,
+ interpolation=interpolation,
)
res = res_df.iloc[0]
if axis == 1 and len(self) == 0:
@@ -11277,7 +11246,11 @@ def quantile(
res = self._constructor([], index=q, columns=cols, dtype=dtype)
return res.__finalize__(self, method="quantile")
- res = data._mgr.quantile(qs=q, axis=1, interpolation=interpolation)
+ # error: Argument "qs" to "quantile" of "BlockManager" has incompatible type
+ # "Index"; expected "Float64Index"
+ res = data._mgr.quantile(
+ qs=q, axis=1, interpolation=interpolation # type: ignore[arg-type]
+ )
result = self._constructor(res)
return result.__finalize__(self, method="quantile")
@@ -11286,10 +11259,10 @@ def quantile(
def asfreq(
self,
freq: Frequency,
- method=None,
+ method: FillnaOptions | None = None,
how: str | None = None,
normalize: bool = False,
- fill_value=None,
+ fill_value: Hashable = None,
) -> DataFrame:
return super().asfreq(
freq=freq,
@@ -11303,15 +11276,15 @@ def asfreq(
def resample(
self,
rule,
- axis=0,
+ axis: Axis = 0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
- on=None,
- level=None,
+ on: Level = None,
+ level: Level = None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
group_keys: bool | lib.NoDefault = no_default,
@@ -11426,7 +11399,7 @@ def to_period(
setattr(new_obj, axis_name, new_ax)
return new_obj
- def isin(self, values) -> DataFrame:
+ def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame:
"""
Whether each element in the DataFrame is contained in values.
@@ -11521,8 +11494,13 @@ def isin(self, values) -> DataFrame:
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
+ # error: Argument 2 to "isin" has incompatible type "Union[Sequence[Any],
+ # Mapping[Any, Any]]"; expected "Union[Union[ExtensionArray,
+ # ndarray[Any, Any]], Index, Series]"
result = self._constructor(
- algorithms.isin(self.values.ravel(), values).reshape(self.shape),
+ algorithms.isin(
+ self.values.ravel(), values # type: ignore[arg-type]
+ ).reshape(self.shape),
self.index,
self.columns,
)
@@ -11667,7 +11645,7 @@ def ffill(
axis: None | Axis = ...,
inplace: Literal[False] = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> DataFrame:
...
@@ -11678,7 +11656,7 @@ def ffill(
axis: None | Axis = ...,
inplace: Literal[True],
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> None:
...
@@ -11689,7 +11667,7 @@ def ffill(
axis: None | Axis = ...,
inplace: bool = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> DataFrame | None:
...
@@ -11700,7 +11678,7 @@ def ffill( # type: ignore[override]
axis: None | Axis = None,
inplace: bool = False,
limit: None | int = None,
- downcast=None,
+ downcast: dict | None = None,
) -> DataFrame | None:
return super().ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast)
@@ -11753,8 +11731,8 @@ def bfill( # type: ignore[override]
)
def clip(
self: DataFrame,
- lower=None,
- upper=None,
+ lower: float | None = None,
+ upper: float | None = None,
axis: Axis | None = None,
inplace: bool = False,
*args,
@@ -11792,10 +11770,10 @@ def where(
other=...,
*,
inplace: Literal[False] = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> DataFrame:
...
@@ -11806,10 +11784,10 @@ def where(
other=...,
*,
inplace: Literal[True],
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> None:
...
@@ -11820,10 +11798,10 @@ def where(
other=...,
*,
inplace: bool = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> DataFrame | None:
...
@@ -11837,10 +11815,10 @@ def where( # type: ignore[override]
cond,
other=lib.no_default,
inplace: bool = False,
- axis=None,
- level=None,
+ axis: Axis | None = None,
+ level: Level = None,
errors: IgnoreRaise | lib.NoDefault = "raise",
- try_cast=lib.no_default,
+ try_cast: bool | lib.NoDefault = lib.no_default,
) -> DataFrame | None:
return super().where(
cond,
@@ -11858,10 +11836,10 @@ def mask(
other=...,
*,
inplace: Literal[False] = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> DataFrame:
...
@@ -11872,10 +11850,10 @@ def mask(
other=...,
*,
inplace: Literal[True],
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> None:
...
@@ -11886,10 +11864,10 @@ def mask(
other=...,
*,
inplace: bool = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> DataFrame | None:
...
@@ -11903,10 +11881,10 @@ def mask( # type: ignore[override]
cond,
other=np.nan,
inplace: bool = False,
- axis=None,
- level=None,
+ axis: Axis | None = None,
+ level: Level = None,
errors: IgnoreRaise | lib.NoDefault = "raise",
- try_cast=lib.no_default,
+ try_cast: bool | lib.NoDefault = lib.no_default,
) -> DataFrame | None:
return super().mask(
cond,
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8096b57168d8c..d9264e8a18f2e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -49,8 +49,10 @@
DtypeArg,
DtypeObj,
FilePath,
+ FillnaOptions,
FloatFormatType,
FormattersType,
+ Frequency,
IgnoreRaise,
IndexKeyFunc,
IndexLabel,
@@ -1741,7 +1743,7 @@ def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
raise ValueError(msg)
@final
- def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
+ def _get_label_or_level_values(self, key: Level, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
@@ -3191,7 +3193,7 @@ def to_latex(
multicolumn: bool_t | None = ...,
multicolumn_format: str | None = ...,
multirow: bool_t | None = ...,
- caption: str | None = ...,
+ caption: str | tuple[str, str] | None = ...,
label: str | None = ...,
position: str | None = ...,
) -> str:
@@ -3219,7 +3221,7 @@ def to_latex(
multicolumn: bool_t | None = ...,
multicolumn_format: str | None = ...,
multirow: bool_t | None = ...,
- caption: str | None = ...,
+ caption: str | tuple[str, str] | None = ...,
label: str | None = ...,
position: str | None = ...,
) -> None:
@@ -3248,7 +3250,7 @@ def to_latex(
multicolumn: bool_t | None = None,
multicolumn_format: str | None = None,
multirow: bool_t | None = None,
- caption: str | None = None,
+ caption: str | tuple[str, str] | None = None,
label: str | None = None,
position: str | None = None,
) -> str | None:
@@ -4895,7 +4897,7 @@ def sort_index(
self,
*,
axis: Axis = ...,
- level: Level | None = ...,
+ level: IndexLabel = ...,
ascending: bool_t | Sequence[bool_t] = ...,
inplace: Literal[True],
kind: SortKind = ...,
@@ -4911,7 +4913,7 @@ def sort_index(
self: NDFrameT,
*,
axis: Axis = ...,
- level: Level | None = ...,
+ level: IndexLabel = ...,
ascending: bool_t | Sequence[bool_t] = ...,
inplace: Literal[False] = ...,
kind: SortKind = ...,
@@ -4927,7 +4929,7 @@ def sort_index(
self: NDFrameT,
*,
axis: Axis = ...,
- level: Level | None = ...,
+ level: IndexLabel = ...,
ascending: bool_t | Sequence[bool_t] = ...,
inplace: bool_t = ...,
kind: SortKind = ...,
@@ -4941,7 +4943,7 @@ def sort_index(
def sort_index(
self: NDFrameT,
axis: Axis = 0,
- level: Level | None = None,
+ level: IndexLabel = None,
ascending: bool_t | Sequence[bool_t] = True,
inplace: bool_t = False,
kind: SortKind = "quicksort",
@@ -6584,15 +6586,54 @@ def convert_dtypes(
# ----------------------------------------------------------------------
# Filling NA's
+ @overload
+ def fillna(
+ self: NDFrameT,
+ value: Hashable | Mapping | Series | DataFrame = ...,
+ *,
+ method: FillnaOptions | None = ...,
+ axis: Axis | None = ...,
+ inplace: Literal[False] = ...,
+ limit: int | None = ...,
+ downcast: dict | None = ...,
+ ) -> NDFrameT:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value: Hashable | Mapping | Series | DataFrame = ...,
+ *,
+ method: FillnaOptions | None = ...,
+ axis: Axis | None = ...,
+ inplace: Literal[True],
+ limit: int | None = ...,
+ downcast: dict | None = ...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self: NDFrameT,
+ value: Hashable | Mapping | Series | DataFrame = ...,
+ *,
+ method: FillnaOptions | None = ...,
+ axis: Axis | None = ...,
+ inplace: bool_t = ...,
+ limit: int | None = ...,
+ downcast: dict | None = ...,
+ ) -> NDFrameT | None:
+ ...
+
@doc(**_shared_doc_kwargs)
def fillna(
self: NDFrameT,
- value=None,
- method=None,
- axis=None,
+ value: Hashable | Mapping | Series | DataFrame = None,
+ method: FillnaOptions | None = None,
+ axis: Axis | None = None,
inplace: bool_t = False,
- limit=None,
- downcast=None,
+ limit: int | None = None,
+ downcast: dict | None = None,
) -> NDFrameT | None:
"""
Fill NA/NaN values using the specified method.
@@ -6769,7 +6810,13 @@ def fillna(
for k, v in value.items():
if k not in result:
continue
- downcast_k = downcast if not is_dict else downcast.get(k)
+ # error: Item "None" of "Optional[Dict[Any, Any]]" has no
+ # attribute "get"
+ downcast_k = (
+ downcast
+ if not is_dict
+ else downcast.get(k) # type: ignore[union-attr]
+ )
result.loc[:, k] = result[k].fillna(
v, limit=limit, downcast=downcast_k
)
@@ -6780,7 +6827,10 @@ def fillna(
result = self.T.fillna(value=value, limit=limit).T
- new_data = result
+ # error: Incompatible types in assignment (expression has type
+ # "NDFrameT", variable has type "Union[ArrayManager,
+ # SingleArrayManager, BlockManager, SingleBlockManager]")
+ new_data = result # type: ignore[assignment]
else:
new_data = self._mgr.fillna(
@@ -6805,7 +6855,7 @@ def ffill(
axis: None | Axis = ...,
inplace: Literal[False] = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> NDFrameT:
...
@@ -6816,7 +6866,7 @@ def ffill(
axis: None | Axis = ...,
inplace: Literal[True],
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> None:
...
@@ -6827,7 +6877,7 @@ def ffill(
axis: None | Axis = ...,
inplace: bool_t = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> NDFrameT | None:
...
@@ -6838,7 +6888,7 @@ def ffill(
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
- downcast=None,
+ downcast: dict | None = None,
) -> NDFrameT | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
@@ -6861,7 +6911,7 @@ def bfill(
axis: None | Axis = ...,
inplace: Literal[False] = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> NDFrameT:
...
@@ -6872,7 +6922,7 @@ def bfill(
axis: None | Axis = ...,
inplace: Literal[True],
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> None:
...
@@ -6883,7 +6933,7 @@ def bfill(
axis: None | Axis = ...,
inplace: bool_t = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> NDFrameT | None:
...
@@ -6894,7 +6944,7 @@ def bfill(
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
- downcast=None,
+ downcast: dict | None = None,
) -> NDFrameT | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
@@ -7983,11 +8033,11 @@ def clip(
@doc(**_shared_doc_kwargs)
def asfreq(
self: NDFrameT,
- freq,
- method=None,
+ freq: Frequency,
+ method: FillnaOptions | None = None,
how: str | None = None,
normalize: bool_t = False,
- fill_value=None,
+ fill_value: Hashable = None,
) -> NDFrameT:
"""
Convert time series to specified frequency.
@@ -8297,15 +8347,15 @@ def between_time(
def resample(
self,
rule,
- axis=0,
+ axis: Axis = 0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
- on=None,
- level=None,
+ on: Level = None,
+ level: Level = None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
group_keys: bool_t | lib.NoDefault = lib.no_default,
@@ -9136,18 +9186,18 @@ def compare(
@doc(**_shared_doc_kwargs)
def align(
- self,
- other,
- join="outer",
- axis=None,
- level=None,
- copy=True,
- fill_value=None,
- method=None,
- limit=None,
- fill_axis=0,
- broadcast_axis=None,
- ):
+ self: NDFrameT,
+ other: NDFrameT,
+ join: Literal["outer", "inner", "left", "right"] = "outer",
+ axis: Axis | None = None,
+ level: Level = None,
+ copy: bool_t = True,
+ fill_value: Hashable = None,
+ method: FillnaOptions | None = None,
+ limit: int | None = None,
+ fill_axis: Axis = 0,
+ broadcast_axis: Axis | None = None,
+ ) -> NDFrameT:
"""
Align two objects on their axes with the specified join method.
@@ -9614,8 +9664,8 @@ def where(
other=...,
*,
inplace: Literal[False] = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
try_cast: bool_t | lib.NoDefault = ...,
) -> NDFrameT:
@@ -9628,8 +9678,8 @@ def where(
other=...,
*,
inplace: Literal[True],
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
try_cast: bool_t | lib.NoDefault = ...,
) -> None:
@@ -9642,8 +9692,8 @@ def where(
other=...,
*,
inplace: bool_t = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
try_cast: bool_t | lib.NoDefault = ...,
) -> NDFrameT | None:
@@ -9665,8 +9715,8 @@ def where(
cond,
other=np.nan,
inplace: bool_t = False,
- axis=None,
- level=None,
+ axis: Axis | None = None,
+ level: Level = None,
errors: IgnoreRaise | lib.NoDefault = "raise",
try_cast: bool_t | lib.NoDefault = lib.no_default,
) -> NDFrameT | None:
@@ -9835,8 +9885,8 @@ def mask(
other=...,
*,
inplace: Literal[False] = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
try_cast: bool_t | lib.NoDefault = ...,
) -> NDFrameT:
@@ -9849,8 +9899,8 @@ def mask(
other=...,
*,
inplace: Literal[True],
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
try_cast: bool_t | lib.NoDefault = ...,
) -> None:
@@ -9863,8 +9913,8 @@ def mask(
other=...,
*,
inplace: bool_t = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
try_cast: bool_t | lib.NoDefault = ...,
) -> NDFrameT | None:
@@ -9887,8 +9937,8 @@ def mask(
cond,
other=np.nan,
inplace: bool_t = False,
- axis=None,
- level=None,
+ axis: Axis | None = None,
+ level: Level = None,
errors: IgnoreRaise | lib.NoDefault = "raise",
try_cast: bool_t | lib.NoDefault = lib.no_default,
) -> NDFrameT | None:
@@ -9918,7 +9968,11 @@ def mask(
@doc(klass=_shared_doc_kwargs["klass"])
def shift(
- self: NDFrameT, periods=1, freq=None, axis=0, fill_value=None
+ self: NDFrameT,
+ periods: int = 1,
+ freq=None,
+ axis: Axis = 0,
+ fill_value: Hashable = None,
) -> NDFrameT:
"""
Shift index by desired number of periods with an optional time `freq`.
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 138474e21fb57..44b4fa810deee 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2403,7 +2403,9 @@ def size(self) -> DataFrame | Series:
result = self._obj_1d_constructor(result)
if not self.as_index:
- result = result.rename("size").reset_index()
+ # error: Incompatible types in assignment (expression has
+ # type "DataFrame", variable has type "Series")
+ result = result.rename("size").reset_index() # type: ignore[assignment]
return self._reindex_output(result, fill_value=0)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 7e2a9184f04d9..1383f850ab043 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -6427,7 +6427,7 @@ def _transform_index(self, func, *, level=None) -> Index:
items = [func(x) for x in self]
return Index(items, name=self.name, tupleize_cols=False)
- def isin(self, values, level=None) -> np.ndarray:
+ def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
"""
Return a boolean array where the index values are in `values`.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 20f0ecd06fbd1..b1ad3ab175d1b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -13,6 +13,7 @@
Hashable,
Iterable,
Literal,
+ Mapping,
Sequence,
Union,
cast,
@@ -41,6 +42,7 @@
DtypeObj,
FilePath,
FillnaOptions,
+ Frequency,
IgnoreRaise,
IndexKeyFunc,
IndexLabel,
@@ -162,7 +164,6 @@
import pandas.plotting
if TYPE_CHECKING:
-
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
@@ -743,7 +744,7 @@ def array(self) -> ExtensionArray:
return self._mgr.array_values()
# ops
- def ravel(self, order="C"):
+ def ravel(self, order: str = "C") -> np.ndarray:
"""
Return the flattened underlying data as an ndarray.
@@ -911,7 +912,9 @@ def axes(self) -> list[Index]:
# Indexing Methods
@Appender(NDFrame.take.__doc__)
- def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series:
+ def take(
+ self, indices, axis: Axis = 0, is_copy: bool | None = None, **kwargs
+ ) -> Series:
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
@@ -1319,7 +1322,7 @@ def _maybe_update_cacher(
def _is_mixed_type(self):
return False
- def repeat(self, repeats, axis=None) -> Series:
+ def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series:
"""
Repeat elements of a Series.
@@ -1380,9 +1383,21 @@ def repeat(self, repeats, axis=None) -> Series:
@overload
def reset_index(
self,
- level: Level = ...,
+ level: IndexLabel = ...,
*,
- drop: bool = ...,
+ drop: Literal[False] = ...,
+ name: Level = ...,
+ inplace: Literal[False] = ...,
+ allow_duplicates: bool = ...,
+ ) -> DataFrame:
+ ...
+
+ @overload
+ def reset_index(
+ self,
+ level: IndexLabel = ...,
+ *,
+ drop: Literal[True],
name: Level = ...,
inplace: Literal[False] = ...,
allow_duplicates: bool = ...,
@@ -1392,7 +1407,7 @@ def reset_index(
@overload
def reset_index(
self,
- level: Level = ...,
+ level: IndexLabel = ...,
*,
drop: bool = ...,
name: Level = ...,
@@ -1404,12 +1419,12 @@ def reset_index(
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
def reset_index(
self,
- level: Level = None,
+ level: IndexLabel = None,
drop: bool = False,
name: Level = lib.no_default,
inplace: bool = False,
allow_duplicates: bool = False,
- ) -> Series | None:
+ ) -> DataFrame | Series | None:
"""
Generate a new DataFrame or Series with the index reset.
@@ -1554,9 +1569,7 @@ def reset_index(
name = self.name
df = self.to_frame(name)
- # error: Incompatible return value type (got "DataFrame", expected
- # "Optional[Series]")
- return df.reset_index( # type: ignore[return-value]
+ return df.reset_index(
level=level, drop=drop, allow_duplicates=allow_duplicates
)
return None
@@ -1818,7 +1831,7 @@ def keys(self) -> Index:
"""
return self.index
- def to_dict(self, into=dict):
+ def to_dict(self, into: type[dict] = dict) -> dict:
"""
Convert Series to {label -> value} dict or dict-like object.
@@ -2003,8 +2016,8 @@ def _set_name(self, name, inplace=False) -> Series:
def groupby(
self,
by=None,
- axis=0,
- level=None,
+ axis: Axis = 0,
+ level: Level = None,
as_index: bool = True,
sort: bool = True,
group_keys: bool | lib.NoDefault = no_default,
@@ -2047,8 +2060,7 @@ def groupby(
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
-
- def count(self, level=None):
+ def count(self, level: Level = None):
"""
Return number of non-NA/null observations in the Series.
@@ -2200,23 +2212,30 @@ def unique(self) -> ArrayLike:
return super().unique()
@overload
- def drop_duplicates(self, keep=..., inplace: Literal[False] = ...) -> Series:
- ...
-
- @overload
- def drop_duplicates(self, keep, inplace: Literal[True]) -> None:
+ def drop_duplicates(
+ self,
+ keep: Literal["first", "last", False] = ...,
+ *,
+ inplace: Literal[False] = ...,
+ ) -> Series:
...
@overload
- def drop_duplicates(self, *, inplace: Literal[True]) -> None:
+ def drop_duplicates(
+ self, keep: Literal["first", "last", False] = ..., *, inplace: Literal[True]
+ ) -> None:
...
@overload
- def drop_duplicates(self, keep=..., inplace: bool = ...) -> Series | None:
+ def drop_duplicates(
+ self, keep: Literal["first", "last", False] = ..., *, inplace: bool = ...
+ ) -> Series | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
- def drop_duplicates(self, keep="first", inplace=False) -> Series | None:
+ def drop_duplicates(
+ self, keep: Literal["first", "last", False] = "first", inplace=False
+ ) -> Series | None:
"""
Return Series with duplicate values removed.
@@ -2300,7 +2319,7 @@ def drop_duplicates(self, keep="first", inplace=False) -> Series | None:
else:
return result
- def duplicated(self, keep="first") -> Series:
+ def duplicated(self, keep: Literal["first", "last", False] = "first") -> Series:
"""
Indicate duplicate Series values.
@@ -2380,7 +2399,7 @@ def duplicated(self, keep="first") -> Series:
result = self._constructor(res, index=self.index)
return result.__finalize__(self, method="duplicated")
- def idxmin(self, axis=0, skipna=True, *args, **kwargs):
+ def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable:
"""
Return the row label of the minimum value.
@@ -2448,7 +2467,7 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs):
return np.nan
return self.index[i]
- def idxmax(self, axis=0, skipna=True, *args, **kwargs):
+ def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable:
"""
Return the row label of the maximum value.
@@ -2517,7 +2536,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs):
return np.nan
return self.index[i]
- def round(self, decimals=0, *args, **kwargs) -> Series:
+ def round(self, decimals: int = 0, *args, **kwargs) -> Series:
"""
Round each value in a Series to the given number of decimals.
@@ -2642,7 +2661,13 @@ def quantile(
# scalar
return result.iloc[0]
- def corr(self, other, method="pearson", min_periods=None) -> float:
+ def corr(
+ self,
+ other: Series,
+ method: Literal["pearson", "kendall", "spearman"]
+ | Callable[[np.ndarray, np.ndarray], float] = "pearson",
+ min_periods: int | None = None,
+ ) -> float:
"""
Compute correlation with `other` Series, excluding missing values.
@@ -2850,7 +2875,7 @@ def diff(self, periods: int = 1) -> Series:
self, method="diff"
)
- def autocorr(self, lag=1) -> float:
+ def autocorr(self, lag: int = 1) -> float:
"""
Compute the lag-N autocorrelation.
@@ -2895,7 +2920,7 @@ def autocorr(self, lag=1) -> float:
"""
return self.corr(self.shift(lag))
- def dot(self, other):
+ def dot(self, other: AnyArrayLike) -> Series | np.ndarray:
"""
Compute the dot product between the Series and the columns of other.
@@ -3253,7 +3278,12 @@ def compare(
result_names=result_names,
)
- def combine(self, other, func, fill_value=None) -> Series:
+ def combine(
+ self,
+ other: Series | Hashable,
+ func: Callable[[Hashable, Hashable], Hashable],
+ fill_value: Hashable = None,
+ ) -> Series:
"""
Combine the Series with a Series or scalar according to `func`.
@@ -3400,7 +3430,7 @@ def combine_first(self, other) -> Series:
return this.where(notna(this), other)
- def update(self, other) -> None:
+ def update(self, other: Series | Sequence | Mapping) -> None:
"""
Modify Series in place using values from passed Series.
@@ -3724,7 +3754,7 @@ def sort_index(
self,
*,
axis: Axis = ...,
- level: Level | None = ...,
+ level: IndexLabel = ...,
ascending: bool | Sequence[bool] = ...,
inplace: Literal[True],
kind: SortKind = ...,
@@ -3740,7 +3770,7 @@ def sort_index(
self,
*,
axis: Axis = ...,
- level: Level | None = ...,
+ level: IndexLabel = ...,
ascending: bool | Sequence[bool] = ...,
inplace: Literal[False] = ...,
kind: SortKind = ...,
@@ -3756,7 +3786,7 @@ def sort_index(
self,
*,
axis: Axis = ...,
- level: Level | None = ...,
+ level: IndexLabel = ...,
ascending: bool | Sequence[bool] = ...,
inplace: bool = ...,
kind: SortKind = ...,
@@ -3772,7 +3802,7 @@ def sort_index(
def sort_index( # type: ignore[override]
self,
axis: Axis = 0,
- level: Level | None = None,
+ level: IndexLabel = None,
ascending: bool | Sequence[bool] = True,
inplace: bool = False,
kind: SortKind = "quicksort",
@@ -3928,7 +3958,12 @@ def sort_index( # type: ignore[override]
key=key,
)
- def argsort(self, axis=0, kind="quicksort", order=None) -> Series:
+ def argsort(
+ self,
+ axis: Axis = 0,
+ kind: SortKind = "quicksort",
+ order: None = None,
+ ) -> Series:
"""
Return the integer indices that would sort the Series values.
@@ -3968,7 +4003,9 @@ def argsort(self, axis=0, kind="quicksort", order=None) -> Series:
res = self._constructor(result, index=self.index, name=self.name, dtype=np.intp)
return res.__finalize__(self, method="argsort")
- def nlargest(self, n=5, keep="first") -> Series:
+ def nlargest(
+ self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
+ ) -> Series:
"""
Return the largest `n` elements.
@@ -4223,7 +4260,7 @@ def nsmallest(self, n: int = 5, keep: str = "first") -> Series:
dtype: object"""
),
)
- def swaplevel(self, i=-2, j=-1, copy=True) -> Series:
+ def swaplevel(self, i: Level = -2, j: Level = -1, copy: bool = True) -> Series:
"""
Swap levels i and j in a :class:`MultiIndex`.
@@ -4248,7 +4285,7 @@ def swaplevel(self, i=-2, j=-1, copy=True) -> Series:
self, method="swaplevel"
)
- def reorder_levels(self, order) -> Series:
+ def reorder_levels(self, order: Sequence[Level]) -> Series:
"""
Rearrange index levels using input order.
@@ -4341,7 +4378,7 @@ def explode(self, ignore_index: bool = False) -> Series:
return self._constructor(values, index=index, name=self.name)
- def unstack(self, level=-1, fill_value=None) -> DataFrame:
+ def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame:
"""
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
@@ -4390,7 +4427,11 @@ def unstack(self, level=-1, fill_value=None) -> DataFrame:
# ----------------------------------------------------------------------
# function application
- def map(self, arg, na_action=None) -> Series:
+ def map(
+ self,
+ arg: Callable | Mapping | Series,
+ na_action: Literal["ignore"] | None = None,
+ ) -> Series:
"""
Map values of Series according to an input mapping or function.
@@ -4522,7 +4563,7 @@ def _gotitem(self, key, ndim, subset=None) -> Series:
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
)
- def aggregate(self, func=None, axis=0, *args, **kwargs):
+ def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
@@ -4776,17 +4817,17 @@ def _needs_reindex_multi(self, axes, method, level) -> bool:
)
def align(
self,
- other,
- join="outer",
- axis=None,
- level=None,
- copy=True,
- fill_value=None,
- method=None,
- limit=None,
- fill_axis=0,
- broadcast_axis=None,
- ):
+ other: Series,
+ join: Literal["outer", "inner", "left", "right"] = "outer",
+ axis: Axis | None = None,
+ level: Level = None,
+ copy: bool = True,
+ fill_value: Hashable = None,
+ method: FillnaOptions | None = None,
+ limit: int | None = None,
+ fill_axis: Axis = 0,
+ broadcast_axis: Axis | None = None,
+ ) -> Series:
return super().align(
other,
join=join,
@@ -5155,129 +5196,53 @@ def drop( # type: ignore[override]
@overload
def fillna(
self,
- value=...,
+ value: Hashable | Mapping | Series | DataFrame = ...,
+ *,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: Literal[False] = ...,
- limit=...,
- downcast=...,
+ limit: int | None = ...,
+ downcast: dict | None = ...,
) -> Series:
...
@overload
def fillna(
self,
- value,
- method: FillnaOptions | None,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
+ value: Hashable | Mapping | Series | DataFrame = ...,
*,
+ method: FillnaOptions | None = ...,
+ axis: Axis | None = ...,
inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- value,
- *,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- *,
- method: FillnaOptions | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- *,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- *,
- method: FillnaOptions | None,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- value,
- *,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
+ limit: int | None = ...,
+ downcast: dict | None = ...,
) -> None:
...
@overload
def fillna(
self,
- value,
- method: FillnaOptions | None,
+ value: Hashable | Mapping | Series | DataFrame = ...,
*,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
- @overload
- def fillna(
- self,
- value=...,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: bool = ...,
- limit=...,
- downcast=...,
+ limit: int | None = ...,
+ downcast: dict | None = ...,
) -> Series | None:
...
- # error: Cannot determine type of 'fillna'
+ # error: Signature of "fillna" incompatible with supertype "NDFrame"
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"])
- @doc(NDFrame.fillna, **_shared_doc_kwargs) # type: ignore[has-type]
- def fillna(
+ @doc(NDFrame.fillna, **_shared_doc_kwargs)
+ def fillna( # type: ignore[override]
self,
- value: object | ArrayLike | None = None,
+ value: Hashable | Mapping | Series | DataFrame = None,
method: FillnaOptions | None = None,
- axis=None,
- inplace=False,
- limit=None,
- downcast=None,
+ axis: Axis | None = None,
+ inplace: bool = False,
+ limit: int | None = None,
+ downcast: dict | None = None,
) -> Series | None:
return super().fillna(
value=value,
@@ -5324,7 +5289,7 @@ def replace(
*,
inplace: Literal[False] = ...,
limit: int | None = ...,
- regex=...,
+ regex: bool = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
) -> Series:
...
@@ -5337,7 +5302,7 @@ def replace(
*,
inplace: Literal[True],
limit: int | None = ...,
- regex=...,
+ regex: bool = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
) -> None:
...
@@ -5358,7 +5323,7 @@ def replace( # type: ignore[override]
value=lib.no_default,
inplace: bool = False,
limit: int | None = None,
- regex=False,
+ regex: bool = False,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default,
) -> Series | None:
return super().replace(
@@ -5410,7 +5375,9 @@ def _replace_single(self, to_replace, method: str, inplace: bool, limit):
# error: Cannot determine type of 'shift'
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
- def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> Series:
+ def shift(
+ self, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None
+ ) -> Series:
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
@@ -5547,7 +5514,12 @@ def isin(self, values) -> Series:
self, method="isin"
)
- def between(self, left, right, inclusive="both") -> Series:
+ def between(
+ self,
+ left,
+ right,
+ inclusive: Literal["both", "neither", "left", "right"] = "both",
+ ) -> Series:
"""
Return boolean Series equivalent to left <= series <= right.
@@ -5615,7 +5587,9 @@ def between(self, left, right, inclusive="both") -> Series:
3 False
dtype: bool
"""
- if inclusive is True or inclusive is False:
+ # error: Non-overlapping identity check (left operand type: "Literal['both',
+ # 'neither', 'left', 'right']", right operand type: "Literal[False]")
+ if inclusive is True or inclusive is False: # type: ignore[comparison-overlap]
warnings.warn(
"Boolean inputs to the `inclusive` argument are deprecated in "
"favour of `both` or `neither`.",
@@ -5812,11 +5786,11 @@ def dropna(
@doc(NDFrame.asfreq, **_shared_doc_kwargs) # type: ignore[has-type]
def asfreq(
self,
- freq,
- method=None,
+ freq: Frequency,
+ method: FillnaOptions | None = None,
how: str | None = None,
normalize: bool = False,
- fill_value=None,
+ fill_value: Hashable = None,
) -> Series:
return super().asfreq(
freq=freq,
@@ -5831,15 +5805,15 @@ def asfreq(
def resample(
self,
rule,
- axis=0,
+ axis: Axis = 0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
- on=None,
- level=None,
+ on: Level = None,
+ level: Level = None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
group_keys: bool | lib.NoDefault = no_default,
@@ -5860,7 +5834,12 @@ def resample(
group_keys=group_keys,
)
- def to_timestamp(self, freq=None, how="start", copy=True) -> Series:
+ def to_timestamp(
+ self,
+ freq=None,
+ how: Literal["s", "e", "start", "end"] = "start",
+ copy: bool = True,
+ ) -> Series:
"""
Cast to DatetimeIndex of Timestamps, at *beginning* of period.
@@ -5889,7 +5868,7 @@ def to_timestamp(self, freq=None, how="start", copy=True) -> Series:
self, method="to_timestamp"
)
- def to_period(self, freq=None, copy=True) -> Series:
+ def to_period(self, freq: str | None = None, copy: bool = True) -> Series:
"""
Convert Series from DatetimeIndex to PeriodIndex.
@@ -5923,7 +5902,7 @@ def ffill(
axis: None | Axis = ...,
inplace: Literal[False] = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> Series:
...
@@ -5934,7 +5913,7 @@ def ffill(
axis: None | Axis = ...,
inplace: Literal[True],
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> None:
...
@@ -5945,7 +5924,7 @@ def ffill(
axis: None | Axis = ...,
inplace: bool = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> Series | None:
...
@@ -5956,7 +5935,7 @@ def ffill( # type: ignore[override]
axis: None | Axis = None,
inplace: bool = False,
limit: None | int = None,
- downcast=None,
+ downcast: dict | None = None,
) -> Series | None:
return super().ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast)
@@ -5967,7 +5946,7 @@ def bfill(
axis: None | Axis = ...,
inplace: Literal[False] = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> Series:
...
@@ -5978,7 +5957,7 @@ def bfill(
axis: None | Axis = ...,
inplace: Literal[True],
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> None:
...
@@ -5989,7 +5968,7 @@ def bfill(
axis: None | Axis = ...,
inplace: bool = ...,
limit: None | int = ...,
- downcast=...,
+ downcast: dict | None = ...,
) -> Series | None:
...
@@ -6000,7 +5979,7 @@ def bfill( # type: ignore[override]
axis: None | Axis = None,
inplace: bool = False,
limit: None | int = None,
- downcast=None,
+ downcast: dict | None = None,
) -> Series | None:
return super().bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast)
@@ -6048,10 +6027,10 @@ def where(
other=...,
*,
inplace: Literal[False] = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> Series:
...
@@ -6062,10 +6041,10 @@ def where(
other=...,
*,
inplace: Literal[True],
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> None:
...
@@ -6076,10 +6055,10 @@ def where(
other=...,
*,
inplace: bool = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> Series | None:
...
@@ -6093,10 +6072,10 @@ def where( # type: ignore[override]
cond,
other=lib.no_default,
inplace: bool = False,
- axis=None,
- level=None,
+ axis: Axis | None = None,
+ level: Level = None,
errors: IgnoreRaise | lib.NoDefault = lib.no_default,
- try_cast=lib.no_default,
+ try_cast: bool | lib.NoDefault = lib.no_default,
) -> Series | None:
return super().where(
cond,
@@ -6114,10 +6093,10 @@ def mask(
other=...,
*,
inplace: Literal[False] = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> Series:
...
@@ -6128,10 +6107,10 @@ def mask(
other=...,
*,
inplace: Literal[True],
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> None:
...
@@ -6142,10 +6121,10 @@ def mask(
other=...,
*,
inplace: bool = ...,
- axis=...,
- level=...,
+ axis: Axis | None = ...,
+ level: Level = ...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool | lib.NoDefault = ...,
) -> Series | None:
...
@@ -6159,10 +6138,10 @@ def mask( # type: ignore[override]
cond,
other=np.nan,
inplace: bool = False,
- axis=None,
- level=None,
+ axis: Axis | None = None,
+ level: Level = None,
errors: IgnoreRaise | lib.NoDefault = lib.no_default,
- try_cast=lib.no_default,
+ try_cast: bool | lib.NoDefault = lib.no_default,
) -> Series | None:
return super().mask(
cond,
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 2fda6d239d85b..86ff0d569d2a3 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -358,7 +358,7 @@
def read_excel(
io,
# sheet name is str or int -> DataFrame
- sheet_name: str | int,
+ sheet_name: str | int = ...,
header: int | Sequence[int] | None = ...,
names: list[str] | None = ...,
index_col: int | Sequence[int] | None = ...,
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 98219ef5eea36..27094fff5f812 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1036,7 +1036,7 @@ def to_latex(
multicolumn: bool = False,
multicolumn_format: str | None = None,
multirow: bool = False,
- caption: str | None = None,
+ caption: str | tuple[str, str] | None = None,
label: str | None = None,
position: str | None = None,
) -> str | None:
| contains also changes in generic/frame.py but only to keep them consistent with series.py
edit: and added annotations for frame.py | https://api.github.com/repos/pandas-dev/pandas/pulls/47926 | 2022-08-02T03:56:15Z | 2022-08-15T23:22:45Z | 2022-08-15T23:22:45Z | 2022-09-10T01:38:38Z |
DOC: Move whatsnew for indexing regression | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 89487bfde94a5..4fa64a67b61d1 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -27,6 +27,7 @@ Bug fixes
- The :class:`errors.FutureWarning` raised when passing arguments (other than ``filepath_or_buffer``) as positional in :func:`read_csv` is now raised at the correct stacklevel (:issue:`47385`)
- Bug in :meth:`DataFrame.to_sql` when ``method`` was a ``callable`` that did not return an ``int`` and would raise a ``TypeError`` (:issue:`46891`)
- Bug in :func:`read_xml` when reading XML files with Chinese character tags and would raise ``XMLSyntaxError`` (:issue:`47902`)
+- Bug in :meth:`loc.__getitem__` with a list of keys causing an internal inconsistency that could lead to a disconnect between ``frame.at[x, y]`` vs ``frame[y].loc[x]`` (:issue:`22372`)
.. ---------------------------------------------------------------------------
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index b919701475bd4..bdf811f6a8f6a 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -883,7 +883,6 @@ Interval
Indexing
^^^^^^^^
-- Bug in :meth:`loc.__getitem__` with a list of keys causing an internal inconsistency that could lead to a disconnect between ``frame.at[x, y]`` vs ``frame[y].loc[x]`` (:issue:`22372`)
- Bug in :meth:`DataFrame.iloc` where indexing a single row on a :class:`DataFrame` with a single ExtensionDtype column gave a copy instead of a view on the underlying data (:issue:`45241`)
- Bug in :meth:`DataFrame.__getitem__` returning copy when :class:`DataFrame` has duplicated columns even if a unique column is selected (:issue:`45316`, :issue:`41062`)
- Bug in :meth:`Series.align` does not create :class:`MultiIndex` with union of levels when both MultiIndexes intersections are identical (:issue:`45224`)
| xref #47921, https://github.com/pandas-dev/pandas/issues/47867#issuecomment-1198412271
Moves the whatsnew
| https://api.github.com/repos/pandas-dev/pandas/pulls/47924 | 2022-08-01T22:22:18Z | 2022-08-03T21:06:45Z | 2022-08-03T21:06:45Z | 2022-08-03T21:40:48Z |
DOC: Add pandas-stubs and potential 2.0 API breaks to 1.5 whatsnew | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 502e37705abfb..2576545e538c4 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -14,6 +14,16 @@ including other versions of pandas.
Enhancements
~~~~~~~~~~~~
+.. _whatsnew_150.enhancements.pandas-stubs:
+
+``pandas-stubs``
+^^^^^^^^^^^^^^^^
+
+The ``pandas-stubs`` library is now supported by the pandas development team, providing type stubs for the pandas API. Please visit
+https://github.com/pandas-dev/pandas-stubs for more information.
+
+We thank VirtusLab and Microsoft for their initial, significant contributions to ``pandas-stubs``
+
.. _whatsnew_150.enhancements.dataframe_interchange:
DataFrame interchange protocol implementation
@@ -544,6 +554,14 @@ Other API changes
Deprecations
~~~~~~~~~~~~
+.. warning::
+
+ In the next major version release, 2.0, several larger API changes are being considered without a formal deprecation such as
+ making the standard library `zoneinfo <https://docs.python.org/3/library/zoneinfo.html>`_ the default timezone implementation instead of ``pytz``,
+ having the :class:`Index` support all data types instead of having multiple subclasses (:class:`CategoricalIndex`, :class:`Int64Index`, etc.), and more.
+ The changes under consideration are logged in `this Github issue <https://github.com/pandas-dev/pandas/issues/44823>`_, and any
+ feedback or concerns are welcome.
+
.. _whatsnew_150.deprecations.int_slicing_series:
Label-based integer slicing on a Series with an Int64Index or RangeIndex
| - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
* Added note about `pandas-stubs` library
* Added note about potential 2.0 API breaks: https://github.com/pandas-dev/pandas/issues/44823 | https://api.github.com/repos/pandas-dev/pandas/pulls/47922 | 2022-08-01T22:06:46Z | 2022-08-09T16:34:02Z | 2022-08-09T16:34:02Z | 2022-08-09T16:34:06Z |
Backport PR #45287 on branch 1.4.x (BUG: frame[x].loc[y] inconsistent with frame.at[x, y]) (#45287) | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 6bd7378e05404..2b3223e0ff768 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -26,6 +26,7 @@ Bug fixes
~~~~~~~~~
- The :class:`errors.FutureWarning` raised when passing arguments (other than ``filepath_or_buffer``) as positional in :func:`read_csv` is now raised at the correct stacklevel (:issue:`47385`)
- Bug in :meth:`DataFrame.to_sql` when ``method`` was a ``callable`` that did not return an ``int`` and would raise a ``TypeError`` (:issue:`46891`)
+- Bug in :meth:`loc.__getitem__` with a list of keys causing an internal inconsistency that could lead to a disconnect between ``frame.at[x, y]`` vs ``frame[y].loc[x]`` (:issue:`22372`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 42688fa3e9d5b..f61a64a22cdac 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -671,9 +671,6 @@ def reindex_indexer(
result.axes[axis] = new_axis
return result
- if consolidate:
- self._consolidate_inplace()
-
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._validate_can_reindex(indexer)
@@ -1681,6 +1678,10 @@ def _consolidate_check(self) -> None:
self._known_consolidated = True
def _consolidate_inplace(self) -> None:
+ # In general, _consolidate_inplace should only be called via
+ # DataFrame._consolidate_inplace, otherwise we will fail to invalidate
+ # the DataFrame's _item_cache. The exception is for newly-created
+ # BlockManager objects not yet attached to a DataFrame.
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
diff --git a/pandas/tests/indexing/test_at.py b/pandas/tests/indexing/test_at.py
index d6be817ab6f77..a2f02378d061a 100644
--- a/pandas/tests/indexing/test_at.py
+++ b/pandas/tests/indexing/test_at.py
@@ -48,6 +48,29 @@ def test_selection_methods_of_assigned_col():
class TestAtSetItem:
+ def test_at_setitem_item_cache_cleared(self):
+ # GH#22372 Note the multi-step construction is necessary to trigger
+ # the original bug. pandas/issues/22372#issuecomment-413345309
+ df = DataFrame(index=[0])
+ df["x"] = 1
+ df["cost"] = 2
+
+ # accessing df["cost"] adds "cost" to the _item_cache
+ df["cost"]
+
+ # This loc[[0]] lookup used to call _consolidate_inplace at the
+ # BlockManager level, which failed to clear the _item_cache
+ df.loc[[0]]
+
+ df.at[0, "x"] = 4
+ df.at[0, "cost"] = 789
+
+ expected = DataFrame({"x": [4], "cost": 789}, index=[0])
+ tm.assert_frame_equal(df, expected)
+
+ # And in particular, check that the _item_cache has updated correctly.
+ tm.assert_series_equal(df["cost"], expected["cost"])
+
def test_at_setitem_mixed_index_assignment(self):
# GH#19860
ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 26e46e9f38b9b..83df57f922c9c 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -718,7 +718,10 @@ def test_reindex_items(self):
mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2")
reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0)
- assert reindexed.nblocks == 2
+ # reindex_axis does not consolidate_inplace, as that risks failing to
+ # invalidate _item_cache
+ assert not reindexed.is_consolidated()
+
tm.assert_index_equal(reindexed.items, Index(["g", "c", "a", "d"]))
tm.assert_almost_equal(
mgr.iget(6).internal_values(), reindexed.iget(0).internal_values()
| Backport PR #45287, see https://github.com/pandas-dev/pandas/issues/47867#issuecomment-1198412271
Lets see if this works | https://api.github.com/repos/pandas-dev/pandas/pulls/47921 | 2022-08-01T21:33:37Z | 2022-08-03T19:16:18Z | 2022-08-03T19:16:18Z | 2022-08-03T19:29:13Z |
DOC/TST: Clarify Series.str.get supports passing hashable label | diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index d50daad9a22b1..e7a564c8f3f90 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -996,15 +996,15 @@ def rpartition(self, sep=" ", expand=True):
def get(self, i):
"""
- Extract element from each component at specified position.
+ Extract element from each component at specified position or with specified key.
- Extract element from lists, tuples, or strings in each element in the
+ Extract element from lists, tuples, dict, or strings in each element in the
Series/Index.
Parameters
----------
- i : int
- Position of element to extract.
+ i : int or hashable dict label
+ Position or key of element to extract.
Returns
-------
@@ -1044,6 +1044,15 @@ def get(self, i):
4 NaN
5 None
dtype: object
+
+ Return element with given key
+
+ >>> s = pd.Series([{"name": "Hello", "value": "World"},
+ ... {"name": "Goodbye", "value": "Planet"}])
+ >>> s.str.get('name')
+ 0 Hello
+ 1 Goodbye
+ dtype: object
"""
result = self._data.array._str_get(i)
return self._wrap_result(result)
diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py
index 0e55676699c21..ffa8b557d2379 100644
--- a/pandas/tests/strings/test_strings.py
+++ b/pandas/tests/strings/test_strings.py
@@ -828,3 +828,20 @@ def test_zfill_with_leading_sign():
value = Series(["-cat", "-1", "+dog"])
expected = Series(["-0cat", "-0001", "+0dog"])
tm.assert_series_equal(value.str.zfill(5), expected)
+
+
+def test_get_with_dict_label():
+ # GH47911
+ s = Series(
+ [
+ {"name": "Hello", "value": "World"},
+ {"name": "Goodbye", "value": "Planet"},
+ {"value": "Sea"},
+ ]
+ )
+ result = s.str.get("name")
+ expected = Series(["Hello", "Goodbye", None])
+ tm.assert_series_equal(result, expected)
+ result = s.str.get("value")
+ expected = Series(["World", "Planet", "Sea"])
+ tm.assert_series_equal(result, expected)
| - [ ] closes #47911 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47918 | 2022-08-01T20:28:00Z | 2022-08-15T17:08:20Z | 2022-08-15T17:08:20Z | 2022-08-15T17:58:00Z |
PERF: MultiIndex unpickle | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 26b833f78bec6..bdf32c5296cc6 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -259,6 +259,10 @@ def _new_Index(cls, d):
# GH#23752 "labels" kwarg has been replaced with "codes"
d["codes"] = d.pop("labels")
+ # Since this was a valid MultiIndex at pickle-time, we don't need to
+ # check validty at un-pickle time.
+ d["verify_integrity"] = False
+
elif "dtype" not in d and "data" in d:
# Prevent Index.__new__ from conducting inference;
# "data" key not in RangeIndex
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47916 | 2022-08-01T19:05:55Z | 2022-08-01T21:24:19Z | 2022-08-01T21:24:19Z | 2022-08-01T21:27:28Z |
TYP: pandas.core.generic from pandas-stubs | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index d138ebb9c02a3..3f2d37e6b52a9 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -774,6 +774,7 @@ Other Deprecations
- Deprecated :meth:`Series.rank` returning an empty result when the dtype is non-numeric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`)
- Deprecated argument ``errors`` for :meth:`Series.mask`, :meth:`Series.where`, :meth:`DataFrame.mask`, and :meth:`DataFrame.where` as ``errors`` had no effect on this methods (:issue:`47728`)
- Deprecated arguments ``*args`` and ``**kwargs`` in :class:`Rolling`, :class:`Expanding`, and :class:`ExponentialMovingWindow` ops. (:issue:`47836`)
+- Deprecated unused arguments ``encoding`` and ``verbose`` in :meth:`Series.to_excel` and :meth:`DataFrame.to_excel` (:issue:`47912`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.performance:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a033b7a3f83d7..7352ad2a4985d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -17,6 +17,7 @@
Hashable,
Literal,
Mapping,
+ NoReturn,
Sequence,
Type,
cast,
@@ -41,11 +42,14 @@
AnyArrayLike,
ArrayLike,
Axis,
+ ColspaceArgType,
CompressionOptions,
Dtype,
DtypeArg,
DtypeObj,
FilePath,
+ FloatFormatType,
+ FormattersType,
IgnoreRaise,
IndexKeyFunc,
IndexLabel,
@@ -183,6 +187,9 @@
from pandas.core.resample import Resampler
from pandas.core.series import Series
+ from pandas.io.pytables import HDFStore
+
+
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = {**_shared_docs}
@@ -765,7 +772,9 @@ def _set_axis(self, axis: int, labels: AnyArrayLike | Sequence) -> None:
self._clear_item_cache()
@final
- def swapaxes(self: NDFrameT, axis1, axis2, copy=True) -> NDFrameT:
+ def swapaxes(
+ self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t = True
+ ) -> NDFrameT:
"""
Interchange axes and swap values axes appropriately.
@@ -795,7 +804,7 @@ def swapaxes(self: NDFrameT, axis1, axis2, copy=True) -> NDFrameT:
@final
@doc(klass=_shared_doc_kwargs["klass"])
- def droplevel(self: NDFrameT, level, axis=0) -> NDFrameT:
+ def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT:
"""
Return {klass} with requested index / column level(s) removed.
@@ -1458,7 +1467,7 @@ def __invert__(self: NDFrameT) -> NDFrameT:
return self._constructor(new_data).__finalize__(self, method="__invert__")
@final
- def __nonzero__(self):
+ def __nonzero__(self) -> NoReturn:
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
@@ -1998,7 +2007,7 @@ def empty(self) -> bool_t:
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
- __array_priority__ = 1000
+ __array_priority__: int = 1000
def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
@@ -2134,6 +2143,8 @@ def _repr_data_resource_(self):
# I/O Methods
@final
+ @deprecate_kwarg(old_arg_name="verbose", new_arg_name=None)
+ @deprecate_kwarg(old_arg_name="encoding", new_arg_name=None)
@doc(
klass="object",
storage_options=_shared_docs["storage_options"],
@@ -2145,18 +2156,18 @@ def to_excel(
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: str | None = None,
- columns=None,
- header=True,
- index=True,
- index_label=None,
- startrow=0,
- startcol=0,
- engine=None,
- merge_cells=True,
- encoding=None,
- inf_rep="inf",
- verbose=True,
- freeze_panes=None,
+ columns: Sequence[Hashable] | None = None,
+ header: Sequence[Hashable] | bool_t = True,
+ index: bool_t = True,
+ index_label: IndexLabel = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ engine: str | None = None,
+ merge_cells: bool_t = True,
+ encoding: lib.NoDefault = lib.no_default,
+ inf_rep: str = "inf",
+ verbose: lib.NoDefault = lib.no_default,
+ freeze_panes: tuple[int, int] | None = None,
storage_options: StorageOptions = None,
) -> None:
"""
@@ -2214,11 +2225,21 @@ def to_excel(
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
+
+ .. deprecated:: 1.5.0
+
+ This keyword was not used.
+
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
+
+ .. deprecated:: 1.5.0
+
+ This keyword was not used.
+
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
@@ -2587,7 +2608,7 @@ def to_json(
@final
def to_hdf(
self,
- path_or_buf,
+ path_or_buf: FilePath | HDFStore,
key: str,
mode: str = "a",
complevel: int | None = None,
@@ -2740,13 +2761,13 @@ def to_sql(
self,
name: str,
con,
- schema=None,
+ schema: str | None = None,
if_exists: str = "fail",
index: bool_t = True,
- index_label=None,
- chunksize=None,
+ index_label: IndexLabel = None,
+ chunksize: int | None = None,
dtype: DtypeArg | None = None,
- method=None,
+ method: str | None = None,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
@@ -3147,33 +3168,89 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal'
else:
return xarray.Dataset.from_dataframe(self)
+ @overload
+ def to_latex(
+ self,
+ buf: None = ...,
+ columns: Sequence[Hashable] | None = ...,
+ col_space: ColspaceArgType | None = ...,
+ header: bool_t | Sequence[str] = ...,
+ index: bool_t = ...,
+ na_rep: str = ...,
+ formatters: FormattersType | None = ...,
+ float_format: FloatFormatType | None = ...,
+ sparsify: bool_t | None = ...,
+ index_names: bool_t = ...,
+ bold_rows: bool_t = ...,
+ column_format: str | None = ...,
+ longtable: bool_t | None = ...,
+ escape: bool_t | None = ...,
+ encoding: str | None = ...,
+ decimal: str = ...,
+ multicolumn: bool_t | None = ...,
+ multicolumn_format: str | None = ...,
+ multirow: bool_t | None = ...,
+ caption: str | None = ...,
+ label: str | None = ...,
+ position: str | None = ...,
+ ) -> str:
+ ...
+
+ @overload
+ def to_latex(
+ self,
+ buf: FilePath | WriteBuffer[str],
+ columns: Sequence[Hashable] | None = ...,
+ col_space: ColspaceArgType | None = ...,
+ header: bool_t | Sequence[str] = ...,
+ index: bool_t = ...,
+ na_rep: str = ...,
+ formatters: FormattersType | None = ...,
+ float_format: FloatFormatType | None = ...,
+ sparsify: bool_t | None = ...,
+ index_names: bool_t = ...,
+ bold_rows: bool_t = ...,
+ column_format: str | None = ...,
+ longtable: bool_t | None = ...,
+ escape: bool_t | None = ...,
+ encoding: str | None = ...,
+ decimal: str = ...,
+ multicolumn: bool_t | None = ...,
+ multicolumn_format: str | None = ...,
+ multirow: bool_t | None = ...,
+ caption: str | None = ...,
+ label: str | None = ...,
+ position: str | None = ...,
+ ) -> None:
+ ...
+
@final
@doc(returns=fmt.return_docstring)
def to_latex(
self,
- buf=None,
- columns=None,
- col_space=None,
- header=True,
- index=True,
- na_rep="NaN",
- formatters=None,
- float_format=None,
- sparsify=None,
- index_names=True,
- bold_rows=False,
- column_format=None,
- longtable=None,
- escape=None,
- encoding=None,
- decimal=".",
- multicolumn=None,
- multicolumn_format=None,
- multirow=None,
- caption=None,
- label=None,
- position=None,
- ):
+ buf: FilePath | WriteBuffer[str] | None = None,
+ columns: Sequence[Hashable] | None = None,
+ col_space: ColspaceArgType | None = None,
+ header: bool_t | Sequence[str] = True,
+ index: bool_t = True,
+ na_rep: str = "NaN",
+ formatters: FormattersType | None = None,
+ float_format: FloatFormatType | None = None,
+ sparsify: bool_t | None = None,
+ index_names: bool_t = True,
+ bold_rows: bool_t = False,
+ column_format: str | None = None,
+ longtable: bool_t | None = None,
+ escape: bool_t | None = None,
+ encoding: str | None = None,
+ decimal: str = ".",
+ multicolumn: bool_t | None = None,
+ multicolumn_format: str | None = None,
+ multirow: bool_t | None = None,
+ caption: str | None = None,
+ label: str | None = None,
+ position: str | None = None,
+ ) -> str | None:
r"""
Render object to a LaTeX tabular, longtable, or nested table.
@@ -3771,7 +3848,11 @@ def _take_with_is_copy(self: NDFrameT, indices, axis=0) -> NDFrameT:
@final
def xs(
- self: NDFrameT, key, axis=0, level=None, drop_level: bool_t = True
+ self: NDFrameT,
+ key: IndexLabel,
+ axis: Axis = 0,
+ level: IndexLabel = None,
+ drop_level: bool_t = True,
) -> NDFrameT:
"""
Return cross-section from the Series/DataFrame.
@@ -6822,7 +6903,7 @@ def replace(
*,
inplace: Literal[False] = ...,
limit: int | None = ...,
- regex=...,
+ regex: bool_t = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
) -> NDFrameT:
...
@@ -6835,7 +6916,7 @@ def replace(
*,
inplace: Literal[True],
limit: int | None = ...,
- regex=...,
+ regex: bool_t = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
) -> None:
...
@@ -6848,7 +6929,7 @@ def replace(
*,
inplace: bool_t = ...,
limit: int | None = ...,
- regex=...,
+ regex: bool_t = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
) -> NDFrameT | None:
...
@@ -6868,7 +6949,7 @@ def replace(
value=lib.no_default,
inplace: bool_t = False,
limit: int | None = None,
- regex=False,
+ regex: bool_t = False,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default,
) -> NDFrameT | None:
if not (
@@ -9521,7 +9602,7 @@ def where(
axis=...,
level=...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool_t | lib.NoDefault = ...,
) -> NDFrameT:
...
@@ -9535,7 +9616,7 @@ def where(
axis=...,
level=...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool_t | lib.NoDefault = ...,
) -> None:
...
@@ -9549,7 +9630,7 @@ def where(
axis=...,
level=...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool_t | lib.NoDefault = ...,
) -> NDFrameT | None:
...
@@ -9572,7 +9653,7 @@ def where(
axis=None,
level=None,
errors: IgnoreRaise | lib.NoDefault = "raise",
- try_cast=lib.no_default,
+ try_cast: bool_t | lib.NoDefault = lib.no_default,
) -> NDFrameT | None:
"""
Replace values where the condition is {cond_rev}.
@@ -9742,7 +9823,7 @@ def mask(
axis=...,
level=...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool_t | lib.NoDefault = ...,
) -> NDFrameT:
...
@@ -9756,7 +9837,7 @@ def mask(
axis=...,
level=...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool_t | lib.NoDefault = ...,
) -> None:
...
@@ -9770,7 +9851,7 @@ def mask(
axis=...,
level=...,
errors: IgnoreRaise | lib.NoDefault = ...,
- try_cast=...,
+ try_cast: bool_t | lib.NoDefault = ...,
) -> NDFrameT | None:
...
@@ -9794,7 +9875,7 @@ def mask(
axis=None,
level=None,
errors: IgnoreRaise | lib.NoDefault = "raise",
- try_cast=lib.no_default,
+ try_cast: bool_t | lib.NoDefault = lib.no_default,
) -> NDFrameT | None:
inplace = validate_bool_kwarg(inplace, "inplace")
@@ -10456,7 +10537,7 @@ def describe(
percentiles=None,
include=None,
exclude=None,
- datetime_is_numeric=False,
+ datetime_is_numeric: bool_t = False,
) -> NDFrameT:
"""
Generate descriptive statistics.
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 4279a9707e692..d8ef709dc89ea 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -856,8 +856,8 @@ def write(
sheet_name: str = "Sheet1",
startrow: int = 0,
startcol: int = 0,
- freeze_panes=None,
- engine=None,
+ freeze_panes: tuple[int, int] | None = None,
+ engine: str | None = None,
storage_options: StorageOptions = None,
) -> None:
"""
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 086a60774ac4e..71fecba4340ac 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -28,6 +28,7 @@
from pandas._typing import (
DateTimeErrorChoices,
DtypeArg,
+ IndexLabel,
)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import (
@@ -603,7 +604,7 @@ def to_sql(
schema: str | None = None,
if_exists: str = "fail",
index: bool = True,
- index_label=None,
+ index_label: IndexLabel = None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
method: str | None = None,
| Some annotations from pandas-stubs and some from the underlying io functions. | https://api.github.com/repos/pandas-dev/pandas/pulls/47912 | 2022-08-01T01:40:28Z | 2022-08-01T18:22:20Z | 2022-08-01T18:22:20Z | 2022-09-10T01:38:51Z |
BUG: Fix read_xml raising syntax error when reading XML with Chinese tags | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 6bd7378e05404..89487bfde94a5 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -26,6 +26,7 @@ Bug fixes
~~~~~~~~~
- The :class:`errors.FutureWarning` raised when passing arguments (other than ``filepath_or_buffer``) as positional in :func:`read_csv` is now raised at the correct stacklevel (:issue:`47385`)
- Bug in :meth:`DataFrame.to_sql` when ``method`` was a ``callable`` that did not return an ``int`` and would raise a ``TypeError`` (:issue:`46891`)
+- Bug in :func:`read_xml` when reading XML files with Chinese character tags and would raise ``XMLSyntaxError`` (:issue:`47902`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index 9b6eb31dafc07..d52482fe2ef5a 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -12,6 +12,7 @@
)
from pandas._typing import (
+ TYPE_CHECKING,
CompressionOptions,
ConvertersArg,
DtypeArg,
@@ -46,6 +47,14 @@
)
from pandas.io.parsers import TextParser
+if TYPE_CHECKING:
+ from xml.etree.ElementTree import Element
+
+ from lxml.etree import (
+ _Element,
+ _XSLTResultTree,
+ )
+
@doc(
storage_options=_shared_docs["storage_options"],
@@ -410,7 +419,7 @@ def _validate_names(self) -> None:
def _parse_doc(
self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]
- ) -> bytes:
+ ) -> Element | _Element:
"""
Build tree from path_or_buffer.
@@ -427,10 +436,7 @@ class _EtreeFrameParser(_XMLFrameParser):
"""
def parse_data(self) -> list[dict[str, str | None]]:
- from xml.etree.ElementTree import (
- XML,
- iterparse,
- )
+ from xml.etree.ElementTree import iterparse
if self.stylesheet is not None:
raise ValueError(
@@ -438,7 +444,7 @@ def parse_data(self) -> list[dict[str, str | None]]:
)
if self.iterparse is None:
- self.xml_doc = XML(self._parse_doc(self.path_or_buffer))
+ self.xml_doc = self._parse_doc(self.path_or_buffer)
self._validate_path()
elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces)
@@ -503,11 +509,10 @@ def _validate_names(self) -> None:
def _parse_doc(
self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]
- ) -> bytes:
+ ) -> Element:
from xml.etree.ElementTree import (
XMLParser,
parse,
- tostring,
)
handle_data = get_data_from_filepath(
@@ -519,9 +524,9 @@ def _parse_doc(
with preprocess_data(handle_data) as xml_data:
curr_parser = XMLParser(encoding=self.encoding)
- r = parse(xml_data, parser=curr_parser)
+ doc = parse(xml_data, parser=curr_parser)
- return tostring(r.getroot())
+ return doc.getroot()
class _LxmlFrameParser(_XMLFrameParser):
@@ -539,17 +544,14 @@ def parse_data(self) -> list[dict[str, str | None]]:
validate xpath, names, optionally parse and run XSLT,
and parse original or transformed XML and return specific nodes.
"""
- from lxml.etree import (
- XML,
- iterparse,
- )
+ from lxml.etree import iterparse
if self.iterparse is None:
- self.xml_doc = XML(self._parse_doc(self.path_or_buffer))
+ self.xml_doc = self._parse_doc(self.path_or_buffer)
if self.stylesheet:
- self.xsl_doc = XML(self._parse_doc(self.stylesheet))
- self.xml_doc = XML(self._transform_doc())
+ self.xsl_doc = self._parse_doc(self.stylesheet)
+ self.xml_doc = self._transform_doc()
self._validate_path()
elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces)
@@ -607,12 +609,11 @@ def _validate_names(self) -> None:
def _parse_doc(
self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]
- ) -> bytes:
+ ) -> _Element:
from lxml.etree import (
XMLParser,
fromstring,
parse,
- tostring,
)
handle_data = get_data_from_filepath(
@@ -637,9 +638,9 @@ def _parse_doc(
else:
doc = parse(xml_data, parser=curr_parser)
- return tostring(doc)
+ return doc
- def _transform_doc(self) -> bytes:
+ def _transform_doc(self) -> _XSLTResultTree:
"""
Transform original tree using stylesheet.
@@ -652,7 +653,7 @@ def _transform_doc(self) -> bytes:
transformer = XSLT(self.xsl_doc)
new_doc = transformer(self.xml_doc)
- return bytes(new_doc)
+ return new_doc
def get_data_from_filepath(
diff --git a/pandas/tests/io/data/xml/doc_ch_utf.xml b/pandas/tests/io/data/xml/doc_ch_utf.xml
new file mode 100644
index 0000000000000..fde215b89646b
--- /dev/null
+++ b/pandas/tests/io/data/xml/doc_ch_utf.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE fragmentdoc [
+ <!ELEMENT qafragment (qa+)>
+ <!ELEMENT qa ( q, a )>
+ <!ELEMENT q ( #PCDATA | title | name)*>
+ <!ATTLIST q speaker CDATA #REQUIRED>
+ <!ELEMENT a ( #PCDATA | title | name)*>
+ <!ATTLIST a speaker CDATA #REQUIRED>
+ <!ELEMENT name (#PCDATA)>
+ <!ELEMENT title (#PCDATA)>
+ <!ENTITY C4-4F71 "Sorry, this is Big5 only">
+]>
+
+<qafragment>
+ <qa>
+ <問 speaker="Opponent">問 若箇是邪而言破邪 何者是正而道(Sorry, this is Big5 only)申正</問>
+ <答 speaker="吉藏">答 邪既無量 正亦多途 大略為言不出二種 謂有得與無得 有得是邪須破 無得是正須申
+ 故<title>大品經</title> <name>善吉</name> 致問 何等是菩薩道 何等非菩薩道
+ <name>佛</name>答云 有所得非菩薩道 無所得是菩薩道</答>
+ </qa>
+ <qa>
+ <問 speaker="Opponent">問 既破有得申無得 亦應但破性執申假名以不</問>
+ <a speaker="吉藏">答 性執是有得 假名是無得 今破有得申無得 即是破性執申假名也</a>
+ </qa>
+ <qa>
+ <問 speaker="Opponent">問 既破性申假 亦應但破有申無 若有無兩洗 亦應性假雙破耶</問>
+ <答 speaker="吉藏">答 不例 有無皆是性 所以須雙破 既分性假異 故有破不破</答>
+ </qa>
+</qafragment>
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index 410c5f6703dcd..fd4ba87bd302c 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -423,6 +423,40 @@ def test_file_buffered_reader_no_xml_declaration(datapath, parser, mode):
tm.assert_frame_equal(df_str, df_expected)
+def test_string_charset(parser):
+ txt = "<中文標籤><row><c1>1</c1><c2>2</c2></row></中文標籤>"
+
+ df_str = read_xml(txt, parser=parser)
+
+ df_expected = DataFrame({"c1": 1, "c2": 2}, index=[0])
+
+ tm.assert_frame_equal(df_str, df_expected)
+
+
+def test_file_charset(datapath, parser):
+ xml_file = datapath("io", "data", "xml", "doc_ch_utf.xml")
+
+ df_file = read_xml(datapath(xml_file), parser=parser)
+
+ df_expected = DataFrame(
+ {
+ "問": [
+ "問 若箇是邪而言破邪 何者是正而道(Sorry, this is Big5 only)申正",
+ "問 既破有得申無得 亦應但破性執申假名以不",
+ "問 既破性申假 亦應但破有申無 若有無兩洗 亦應性假雙破耶",
+ ],
+ "答": [
+ "答 邪既無量 正亦多途 大略為言不出二種 謂有得與無得 有得是邪須破 無得是正須申\n\t\t故",
+ None,
+ "答 不例 有無皆是性 所以須雙破 既分性假異 故有破不破",
+ ],
+ "a": [None, "答 性執是有得 假名是無得 今破有得申無得 即是破性執申假名也", None],
+ }
+ )
+
+ tm.assert_frame_equal(df_file, df_expected)
+
+
def test_file_handle_close(datapath, parser):
xml_file = datapath("io", "data", "xml", "books.xml")
@@ -1086,6 +1120,35 @@ def test_stylesheet_buffered_reader(datapath, mode):
tm.assert_frame_equal(df_kml, df_style)
+@td.skip_if_no("lxml")
+def test_style_charset():
+ xml = "<中文標籤><row><c1>1</c1><c2>2</c2></row></中文標籤>"
+
+ xsl = """\
+<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+ <xsl:output omit-xml-declaration="yes" indent="yes"/>
+ <xsl:strip-space elements="*"/>
+
+ <xsl:template match="node()|@*">
+ <xsl:copy>
+ <xsl:apply-templates select="node()|@*"/>
+ </xsl:copy>
+ </xsl:template>
+
+ <xsl:template match="中文標籤">
+ <根>
+ <xsl:apply-templates />
+ </根>
+ </xsl:template>
+
+</xsl:stylesheet>"""
+
+ df_orig = read_xml(xml)
+ df_style = read_xml(xml, stylesheet=xsl)
+
+ tm.assert_frame_equal(df_orig, df_style)
+
+
@td.skip_if_no("lxml")
def test_not_stylesheet(datapath):
from lxml.etree import XSLTParseError
| - [X] closes #47902 (Replace xxxx with the Github issue number)
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/v1.4.4.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47905 | 2022-07-31T00:33:01Z | 2022-08-01T18:28:14Z | 2022-08-01T18:28:14Z | 2022-08-08T09:52:36Z |
ENH: Move warnings to error/__init__.py per GH27656 | diff --git a/doc/source/reference/testing.rst b/doc/source/reference/testing.rst
index 338dd87aa8c62..1144c767942d4 100644
--- a/doc/source/reference/testing.rst
+++ b/doc/source/reference/testing.rst
@@ -27,6 +27,7 @@ Exceptions and warnings
errors.AbstractMethodError
errors.AccessorRegistrationWarning
errors.AttributeConflictWarning
+ errors.CategoricalConversionWarning
errors.ClosedFileError
errors.CSSWarning
errors.DatabaseError
@@ -36,6 +37,7 @@ Exceptions and warnings
errors.EmptyDataError
errors.IncompatibilityWarning
errors.IndexingError
+ errors.InvalidColumnName
errors.InvalidIndexError
errors.IntCastingNaNError
errors.MergeError
@@ -49,6 +51,7 @@ Exceptions and warnings
errors.ParserWarning
errors.PerformanceWarning
errors.PossibleDataLossError
+ errors.PossiblePrecisionLoss
errors.PyperclipException
errors.PyperclipWindowsException
errors.SettingWithCopyError
@@ -57,6 +60,7 @@ Exceptions and warnings
errors.UndefinedVariableError
errors.UnsortedIndexError
errors.UnsupportedFunctionCall
+ errors.ValueLabelTypeMismatch
Bug report function
-------------------
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 578431e9ab2d8..3788cb9867dbb 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -275,7 +275,7 @@ Other enhancements
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
- Added ``numeric_only`` argument to :meth:`Resampler.sum`, :meth:`Resampler.prod`, :meth:`Resampler.min`, :meth:`Resampler.max`, :meth:`Resampler.first`, and :meth:`Resampler.last` (:issue:`46442`)
- ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`)
-- :class:`.DataError`, :class:`.SpecificationError`, :class:`.SettingWithCopyError`, :class:`.SettingWithCopyWarning`, :class:`.NumExprClobberingError`, :class:`.UndefinedVariableError`, and :class:`.IndexingError` are now exposed in ``pandas.errors`` (:issue:`27656`)
+- :class:`.DataError`, :class:`.SpecificationError`, :class:`.SettingWithCopyError`, :class:`.SettingWithCopyWarning`, :class:`.NumExprClobberingError`, :class:`.UndefinedVariableError`, :class:`.IndexingError`, :class:`.PyperclipException`, :class:`.PyperclipWindowsException`, :class:`.CSSWarning`, :class:`.PossibleDataLossError`, :class:`.ClosedFileError`, :class:`.IncompatibilityWarning`, :class:`.AttributeConflictWarning`, :class:`.DatabaseError, :class:`.PossiblePrecisionLoss, :class:`.ValueLabelTypeMismatch, :class:`.InvalidColumnName, and :class:`.CategoricalConversionWarning` are now exposed in ``pandas.errors`` (:issue:`27656`)
- Added ``check_like`` argument to :func:`testing.assert_series_equal` (:issue:`47247`)
- Allow reading compressed SAS files with :func:`read_sas` (e.g., ``.sas7bdat.gz`` files)
- :meth:`DatetimeIndex.astype` now supports casting timezone-naive indexes to ``datetime64[s]``, ``datetime64[ms]``, and ``datetime64[us]``, and timezone-aware indexes to the corresponding ``datetime64[unit, tzname]`` dtypes (:issue:`47579`)
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index d7f5e7aab58ab..d0c9ef94f4453 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -479,10 +479,67 @@ class DatabaseError(OSError):
"""
+class PossiblePrecisionLoss(Warning):
+ """
+ Warning raised by to_stata on a column with a value outside or equal to int64.
+
+ When the column value is outside or equal to the int64 value the column is
+ converted to a float64 dtype.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({"s": pd.Series([1, 2**53], dtype=np.int64)})
+ >>> df.to_stata('test') # doctest: +SKIP
+ ... # PossiblePrecisionLoss: Column converted from int64 to float64...
+ """
+
+
+class ValueLabelTypeMismatch(Warning):
+ """
+ Warning raised by to_stata on a category column that contains non-string values.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({"categories": pd.Series(["a", 2], dtype="category")})
+ >>> df.to_stata('test') # doctest: +SKIP
+ ... # ValueLabelTypeMismatch: Stata value labels (pandas categories) must be str...
+ """
+
+
+class InvalidColumnName(Warning):
+ """
+ Warning raised by to_stata the column contains a non-valid stata name.
+
+ Because the column name is an invalid Stata variable, the name needs to be
+ converted.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({"0categories": pd.Series([2, 2])})
+ >>> df.to_stata('test') # doctest: +SKIP
+ ... # InvalidColumnName: Not all pandas column names were valid Stata variable...
+ """
+
+
+class CategoricalConversionWarning(Warning):
+ """
+ Warning is raised when reading a partial labeled Stata file using a iterator.
+
+ Examples
+ --------
+ >>> from pandas.io.stata import StataReader
+ >>> with StataReader('dta_file', chunksize=2) as reader: # doctest: +SKIP
+ ... for i, block in enumerate(reader):
+ ... print(i, block))
+ ... # CategoricalConversionWarning: One or more series with value labels...
+ """
+
+
__all__ = [
"AbstractMethodError",
"AccessorRegistrationWarning",
"AttributeConflictWarning",
+ "CategoricalConversionWarning",
"ClosedFileError",
"CSSWarning",
"DatabaseError",
@@ -492,6 +549,7 @@ class DatabaseError(OSError):
"EmptyDataError",
"IncompatibilityWarning",
"IntCastingNaNError",
+ "InvalidColumnName",
"InvalidIndexError",
"IndexingError",
"MergeError",
@@ -505,6 +563,7 @@ class DatabaseError(OSError):
"ParserWarning",
"PerformanceWarning",
"PossibleDataLossError",
+ "PossiblePrecisionLoss",
"PyperclipException",
"PyperclipWindowsException",
"SettingWithCopyError",
@@ -513,4 +572,5 @@ class DatabaseError(OSError):
"UndefinedVariableError",
"UnsortedIndexError",
"UnsupportedFunctionCall",
+ "ValueLabelTypeMismatch",
]
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 3daa6d837349e..8305a4f6adf84 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -41,6 +41,12 @@
StorageOptions,
WriteBuffer,
)
+from pandas.errors import (
+ CategoricalConversionWarning,
+ InvalidColumnName,
+ PossiblePrecisionLoss,
+ ValueLabelTypeMismatch,
+)
from pandas.util._decorators import (
Appender,
doc,
@@ -493,20 +499,12 @@ def g(x: datetime.datetime) -> int:
"""
-class PossiblePrecisionLoss(Warning):
- pass
-
-
precision_loss_doc: Final = """
Column converted from {0} to {1}, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
-class ValueLabelTypeMismatch(Warning):
- pass
-
-
value_label_mismatch_doc: Final = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
@@ -514,10 +512,6 @@ class ValueLabelTypeMismatch(Warning):
"""
-class InvalidColumnName(Warning):
- pass
-
-
invalid_name_doc: Final = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
@@ -530,11 +524,7 @@ class InvalidColumnName(Warning):
"""
-class CategoricalConversionWarning(Warning):
- pass
-
-
-categorical_conversion_warning = """
+categorical_conversion_warning: Final = """
One or more series with value labels are not fully labeled. Reading this
dataset with an iterator results in categorical variable with different
categories. This occurs since it is not possible to know all possible values
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index 187d5399f5985..c6ca51b7763d9 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -35,6 +35,10 @@
"IncompatibilityWarning",
"AttributeConflictWarning",
"DatabaseError",
+ "PossiblePrecisionLoss",
+ "CategoricalConversionWarning",
+ "InvalidColumnName",
+ "ValueLabelTypeMismatch",
],
)
def test_exception_importable(exc):
| - [x] xref #27656. this GitHub issue is being done in multiple parts
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
I noticed one of the strings didn't have a `Final` so I've added it.
Besides the tslibs and OptionError errors, I think I've moved all the exceptions/warnings to the __errors.py. Is there anything else I should do? | https://api.github.com/repos/pandas-dev/pandas/pulls/47901 | 2022-07-29T20:46:17Z | 2022-08-08T18:49:40Z | 2022-08-08T18:49:40Z | 2022-08-08T18:49:51Z |
BUG: preserve _id in MultiIndex.copy(deep=False) | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index fd6b6ba63d7e0..b4b576df9918e 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1190,6 +1190,7 @@ def copy(
This could be potentially expensive on large MultiIndex objects.
"""
names = self._validate_names(name=name, names=names, deep=deep)
+ keep_id = not deep
if levels is not None:
warnings.warn(
"parameter levels is deprecated and will be removed in a future "
@@ -1197,6 +1198,7 @@ def copy(
FutureWarning,
stacklevel=find_stack_level(),
)
+ keep_id = False
if codes is not None:
warnings.warn(
"parameter codes is deprecated and will be removed in a future "
@@ -1204,6 +1206,7 @@ def copy(
FutureWarning,
stacklevel=find_stack_level(),
)
+ keep_id = False
if deep:
from copy import deepcopy
@@ -1225,6 +1228,8 @@ def copy(
)
new_index._cache = self._cache.copy()
new_index._cache.pop("levels", None) # GH32669
+ if keep_id:
+ new_index._id = self._id
if dtype:
warnings.warn(
diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py
index 9a0e4bc0996be..2b64845c919cf 100644
--- a/pandas/tests/indexes/multi/test_copy.py
+++ b/pandas/tests/indexes/multi/test_copy.py
@@ -104,3 +104,15 @@ def test_copy_deprecated_parameters(deep, param_name, param_value):
idx_copy = idx.copy(deep=deep, **{param_name: param_value})
assert [list(i) for i in getattr(idx_copy, param_name)] == param_value
+
+
+def test_copy_deep_false_retains_id():
+ # GH#47878
+ idx = MultiIndex(
+ levels=[["foo", "bar"], ["fizz", "buzz"]],
+ codes=[[0, 0, 0, 1], [0, 0, 1, 1]],
+ names=["first", "second"],
+ )
+
+ res = idx.copy(deep=False)
+ assert res._id is idx._id
| - [x] closes #47878 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47900 | 2022-07-29T20:33:56Z | 2022-07-29T23:42:28Z | 2022-07-29T23:42:28Z | 2022-07-30T00:49:14Z |
Revert "CI: Pin cython on 32bit build" | diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml
index 8c74a53feed9b..e091160c952f8 100644
--- a/.github/workflows/32-bit-linux.yml
+++ b/.github/workflows/32-bit-linux.yml
@@ -38,7 +38,7 @@ jobs:
/opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \
. ~/virtualenvs/pandas-dev/bin/activate && \
python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \
- pip install cython==0.29.30 numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
+ pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
python setup.py build_ext -q -j2 && \
python -m pip install --no-build-isolation --no-use-pep517 -e . && \
export PANDAS_CI=1 && \
diff --git a/ci/deps/actions-310-numpydev.yaml b/ci/deps/actions-310-numpydev.yaml
index c2cdd38e8599f..ef20c2aa889b9 100644
--- a/ci/deps/actions-310-numpydev.yaml
+++ b/ci/deps/actions-310-numpydev.yaml
@@ -16,8 +16,7 @@ dependencies:
- pytz
- pip
- pip:
- #- cython # TODO: don't install from master after Cython 3.0.0a11 is released
- - "git+https://github.com/cython/cython.git@master"
+ - "cython"
- "--extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple"
- "--pre"
- "numpy"
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index 73700c0da0d47..65918005ad6f1 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.10
# test dependencies
- - cython=0.29.30
+ - cython>=0.29.30
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-38-downstream_compat.yaml
index c11e5e5549dcb..f89d4a743a6f1 100644
--- a/ci/deps/actions-38-downstream_compat.yaml
+++ b/ci/deps/actions-38-downstream_compat.yaml
@@ -6,7 +6,7 @@ dependencies:
- python=3.8
# test dependencies
- - cython=0.29.30
+ - cython>=0.29.30
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index 3ab27830060b2..51ef85ab9c299 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -7,7 +7,7 @@ dependencies:
- python=3.8.0
# test dependencies
- - cython=0.29.30
+ - cython>=0.29.30
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml
index 71b03c2a81939..a4473f5911903 100644
--- a/ci/deps/actions-38.yaml
+++ b/ci/deps/actions-38.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8
# test dependencies
- - cython=0.29.30
+ - cython>=0.29.30
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 3cce3bda3915c..8605a9f4520d7 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.9
# test dependencies
- - cython=0.29.30
+ - cython>=0.29.30
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-38-arm64.yaml
index 4f17c19d9f2a0..e76b3071bd8bb 100644
--- a/ci/deps/circle-38-arm64.yaml
+++ b/ci/deps/circle-38-arm64.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8
# test dependencies
- - cython=0.29.30
+ - cython>=0.29.30
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
| Reverts pandas-dev/pandas#47889
Let's see if cython 0.29.32 fixes this | https://api.github.com/repos/pandas-dev/pandas/pulls/47898 | 2022-07-29T14:54:51Z | 2022-08-01T23:09:11Z | 2022-08-01T23:09:11Z | 2022-08-02T08:42:58Z |
ENH: parse 8 or 9 digit delimited dates | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index d138ebb9c02a3..256f22cc41a5e 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -770,6 +770,7 @@ Other Deprecations
- Deprecated the argument ``na_sentinel`` in :func:`factorize`, :meth:`Index.factorize`, and :meth:`.ExtensionArray.factorize`; pass ``use_na_sentinel=True`` instead to use the sentinel ``-1`` for NaN values and ``use_na_sentinel=False`` instead of ``na_sentinel=None`` to encode NaN values (:issue:`46910`)
- Deprecated :meth:`DataFrameGroupBy.transform` not aligning the result when the UDF returned DataFrame (:issue:`45648`)
- Clarified warning from :func:`to_datetime` when delimited dates can't be parsed in accordance to specified ``dayfirst`` argument (:issue:`46210`)
+- Emit warning from :func:`to_datetime` when delimited dates can't be parsed in accordance to specified ``dayfirst`` argument even for dates where leading zero is omitted (e.g. ``31/1/2001``) (:issue:`47880`)
- Deprecated :class:`Series` and :class:`Resampler` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) raising a ``NotImplementedError`` when the dtype is non-numric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`)
- Deprecated :meth:`Series.rank` returning an empty result when the dtype is non-numeric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`)
- Deprecated argument ``errors`` for :meth:`Series.mask`, :meth:`Series.where`, :meth:`DataFrame.mask`, and :meth:`DataFrame.where` as ``errors`` had no effect on this methods (:issue:`47728`)
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 5cb11436f6f45..97a8f81094a8f 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -99,8 +99,14 @@ cdef:
int MAX_DAYS_IN_MONTH = 31, MAX_MONTH = 12
-cdef inline bint _is_not_delimiter(const char ch):
- return strchr(delimiters, ch) == NULL
+cdef inline bint _is_delimiter(const char ch):
+ return strchr(delimiters, ch) != NULL
+
+
+cdef inline int _parse_1digit(const char* s):
+ cdef int result = 0
+ result += getdigit_ascii(s[0], -10) * 1
+ return result
cdef inline int _parse_2digit(const char* s):
@@ -151,18 +157,37 @@ cdef inline object _parse_delimited_date(str date_string, bint dayfirst):
bint can_swap = 0
buf = get_c_string_buf_and_size(date_string, &length)
- if length == 10:
+ if length == 10 and _is_delimiter(buf[2]) and _is_delimiter(buf[5]):
# parsing MM?DD?YYYY and DD?MM?YYYY dates
- if _is_not_delimiter(buf[2]) or _is_not_delimiter(buf[5]):
- return None, None
month = _parse_2digit(buf)
day = _parse_2digit(buf + 3)
year = _parse_4digit(buf + 6)
reso = 'day'
can_swap = 1
- elif length == 7:
+ elif length == 9 and _is_delimiter(buf[1]) and _is_delimiter(buf[4]):
+ # parsing M?DD?YYYY and D?MM?YYYY dates
+ month = _parse_1digit(buf)
+ day = _parse_2digit(buf + 2)
+ year = _parse_4digit(buf + 5)
+ reso = 'day'
+ can_swap = 1
+ elif length == 9 and _is_delimiter(buf[2]) and _is_delimiter(buf[4]):
+ # parsing MM?D?YYYY and DD?M?YYYY dates
+ month = _parse_2digit(buf)
+ day = _parse_1digit(buf + 3)
+ year = _parse_4digit(buf + 5)
+ reso = 'day'
+ can_swap = 1
+ elif length == 8 and _is_delimiter(buf[1]) and _is_delimiter(buf[3]):
+ # parsing M?D?YYYY and D?M?YYYY dates
+ month = _parse_1digit(buf)
+ day = _parse_1digit(buf + 2)
+ year = _parse_4digit(buf + 4)
+ reso = 'day'
+ can_swap = 1
+ elif length == 7 and _is_delimiter(buf[2]):
# parsing MM?YYYY dates
- if buf[2] == b'.' or _is_not_delimiter(buf[2]):
+ if buf[2] == b'.':
# we cannot reliably tell whether e.g. 10.2010 is a float
# or a date, thus we refuse to parse it here
return None, None
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index d05961b702c51..5d2e5bccd9762 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -1948,6 +1948,39 @@ def test_dayfirst_warnings():
tm.assert_index_equal(expected, res8)
+@pytest.mark.parametrize(
+ "date_string, dayfirst",
+ [
+ pytest.param(
+ "31/1/2014",
+ False,
+ id="second date is single-digit",
+ ),
+ pytest.param(
+ "1/31/2014",
+ True,
+ id="first date is single-digit",
+ ),
+ ],
+)
+def test_dayfirst_warnings_no_leading_zero(date_string, dayfirst):
+ # GH47880
+ initial_value = f"date\n{date_string}"
+ expected = DatetimeIndex(
+ ["2014-01-31"], dtype="datetime64[ns]", freq=None, name="date"
+ )
+ with tm.assert_produces_warning(
+ UserWarning, match=r"may lead to inconsistently parsed dates"
+ ):
+ res = read_csv(
+ StringIO(initial_value),
+ parse_dates=["date"],
+ index_col="date",
+ dayfirst=dayfirst,
+ ).index
+ tm.assert_index_equal(expected, res)
+
+
@skip_pyarrow
def test_infer_first_column_as_index(all_parsers):
# GH#11019
| - [x] closes #47880 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47894 | 2022-07-29T11:23:56Z | 2022-08-01T18:30:56Z | 2022-08-01T18:30:56Z | 2022-08-01T18:31:02Z |
CLN: Rename "add" to "sum" in groupby | diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi
index c7cb9705d7cb9..dfae1bff91ac8 100644
--- a/pandas/_libs/groupby.pyi
+++ b/pandas/_libs/groupby.pyi
@@ -50,7 +50,7 @@ def group_any_all(
val_test: Literal["any", "all"],
skipna: bool,
) -> None: ...
-def group_add(
+def group_sum(
out: np.ndarray, # complexfloating_t[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[complexfloating_t, ndim=2]
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index db785bd962f96..06830a1d84c6e 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -124,7 +124,7 @@ def group_median_float64(
ndarray[intp_t] indexer
float64_t* ptr
- assert min_count == -1, "'min_count' only used in add and prod"
+ assert min_count == -1, "'min_count' only used in sum and prod"
ngroups = len(counts)
N, K = (<object>values).shape
@@ -502,7 +502,7 @@ def group_any_all(
# ----------------------------------------------------------------------
-# group_add, group_prod, group_var, group_mean, group_ohlc
+# group_sum, group_prod, group_var, group_mean, group_ohlc
# ----------------------------------------------------------------------
ctypedef fused mean_t:
@@ -511,17 +511,17 @@ ctypedef fused mean_t:
complex64_t
complex128_t
-ctypedef fused add_t:
+ctypedef fused sum_t:
mean_t
object
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_add(
- add_t[:, ::1] out,
+def group_sum(
+ sum_t[:, ::1] out,
int64_t[::1] counts,
- ndarray[add_t, ndim=2] values,
+ ndarray[sum_t, ndim=2] values,
const intp_t[::1] labels,
Py_ssize_t min_count=0,
bint is_datetimelike=False,
@@ -531,8 +531,8 @@ def group_add(
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- add_t val, t, y
- add_t[:, ::1] sumx, compensation
+ sum_t val, t, y
+ sum_t[:, ::1] sumx, compensation
int64_t[:, ::1] nobs
Py_ssize_t len_values = len(values), len_labels = len(labels)
@@ -546,7 +546,7 @@ def group_add(
N, K = (<object>values).shape
- if add_t is object:
+ if sum_t is object:
# NB: this does not use 'compensation' like the non-object track does.
for i in range(N):
lab = labels[i]
@@ -588,10 +588,10 @@ def group_add(
# not nan
# With dt64/td64 values, values have been cast to float64
- # instead if int64 for group_add, but the logic
+ # instead if int64 for group_sum, but the logic
# is otherwise the same as in _treat_as_na
if val == val and not (
- add_t is float64_t
+ sum_t is float64_t
and is_datetimelike
and val == <float64_t>NPY_NAT
):
@@ -677,7 +677,7 @@ def group_var(
int64_t[:, ::1] nobs
Py_ssize_t len_values = len(values), len_labels = len(labels)
- assert min_count == -1, "'min_count' only used in add and prod"
+ assert min_count == -1, "'min_count' only used in sum and prod"
if len_values != len_labels:
raise ValueError("len(index) != len(labels)")
@@ -745,7 +745,7 @@ def group_mean(
Array containing unique label for each group, with its
ordering matching up to the corresponding record in `values`.
min_count : Py_ssize_t
- Only used in add and prod. Always -1.
+ Only used in sum and prod. Always -1.
is_datetimelike : bool
True if `values` contains datetime-like entries.
mask : ndarray[bool, ndim=2], optional
@@ -766,7 +766,7 @@ def group_mean(
int64_t[:, ::1] nobs
Py_ssize_t len_values = len(values), len_labels = len(labels)
- assert min_count == -1, "'min_count' only used in add and prod"
+ assert min_count == -1, "'min_count' only used in sum and prod"
if len_values != len_labels:
raise ValueError("len(index) != len(labels)")
@@ -821,7 +821,7 @@ def group_ohlc(
Py_ssize_t i, j, N, K, lab
floating val
- assert min_count == -1, "'min_count' only used in add and prod"
+ assert min_count == -1, "'min_count' only used in sum and prod"
if len(labels) == 0:
return
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 9b4991d32692b..06422f8cc5cb0 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1338,7 +1338,6 @@ def _resolve_numeric_only(
if numeric_only and self.obj.ndim == 1 and not is_numeric_dtype(self.obj.dtype):
# GH#47500
- how = "sum" if how == "add" else how
warnings.warn(
f"{type(self).__name__}.{how} called with "
f"numeric_only={numeric_only} and dtype {self.obj.dtype}. This will "
@@ -1738,9 +1737,8 @@ def _cython_agg_general(
kwd_name = "numeric_only"
if how in ["any", "all"]:
kwd_name = "bool_only"
- kernel = "sum" if how == "add" else how
raise NotImplementedError(
- f"{type(self).__name__}.{kernel} does not implement {kwd_name}."
+ f"{type(self).__name__}.{how} does not implement {kwd_name}."
)
elif not is_ser:
data = data.get_numeric_data(copy=False)
@@ -2417,7 +2415,7 @@ def sum(
result = self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
- alias="add",
+ alias="sum",
npfunc=np.sum,
)
@@ -4341,8 +4339,6 @@ def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiInde
def warn_dropping_nuisance_columns_deprecated(cls, how: str, numeric_only) -> None:
- if how == "add":
- how = "sum"
if numeric_only is not lib.no_default and not numeric_only:
# numeric_only was specified and falsey but still dropped nuisance columns
warnings.warn(
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 6dc4ccfa8e1ee..283e4a48657c5 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -121,7 +121,7 @@ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
_CYTHON_FUNCTIONS = {
"aggregate": {
- "add": "group_add",
+ "sum": "group_sum",
"prod": "group_prod",
"min": "group_min",
"max": "group_max",
@@ -213,7 +213,7 @@ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:
values = ensure_float64(values)
elif values.dtype.kind in ["i", "u"]:
- if how in ["add", "var", "prod", "mean", "ohlc"] or (
+ if how in ["sum", "var", "prod", "mean", "ohlc"] or (
self.kind == "transform" and self.has_dropped_na
):
# result may still include NaN, so we have to cast
@@ -241,7 +241,7 @@ def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
if isinstance(dtype, CategoricalDtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
- if how in ["add", "prod", "cumsum", "cumprod"]:
+ if how in ["sum", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
elif how not in ["rank"]:
# only "rank" is implemented in cython
@@ -258,7 +258,7 @@ def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
# TODO: same for period_dtype? no for these methods with Period
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
- if how in ["add", "prod", "cumsum", "cumprod"]:
+ if how in ["sum", "prod", "cumsum", "cumprod"]:
raise TypeError(f"datetime64 type does not support {how} operations")
elif is_timedelta64_dtype(dtype):
if how in ["prod", "cumprod"]:
@@ -311,7 +311,7 @@ def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
"""
how = self.how
- if how in ["add", "cumsum", "sum", "prod"]:
+ if how in ["sum", "cumsum", "sum", "prod"]:
if dtype == np.dtype(bool):
return np.dtype(np.int64)
elif how in ["mean", "median", "var"]:
@@ -567,7 +567,7 @@ def _call_cython_op(
result_mask=result_mask,
is_datetimelike=is_datetimelike,
)
- elif self.how in ["add"]:
+ elif self.how in ["sum"]:
# We support datetimelike
func(
out=result,
@@ -625,7 +625,7 @@ def _call_cython_op(
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cast_blocklist we get here
# Casting only needed for float16, bool, datetimelike,
- # and self.how in ["add", "prod", "ohlc", "cumprod"]
+ # and self.how in ["sum", "prod", "ohlc", "cumprod"]
res_dtype = self._get_result_dtype(orig_values.dtype)
op_result = maybe_downcast_to_dtype(result, res_dtype)
else:
diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py
index 869ed31b6a2d9..6c5a3ae67c78a 100644
--- a/pandas/tests/groupby/aggregate/test_cython.py
+++ b/pandas/tests/groupby/aggregate/test_cython.py
@@ -166,7 +166,7 @@ def test_cython_fail_agg():
("mean", np.mean),
("median", np.median),
("var", np.var),
- ("add", np.sum),
+ ("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
@@ -214,7 +214,7 @@ def test_cython_agg_empty_buckets_nanops(observed):
grps = range(0, 25, 5)
# add / sum
result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
- "add", alt=None, numeric_only=True
+ "sum", alt=None, numeric_only=True
)
intervals = pd.interval_range(0, 20, freq=5, inclusive="right")
expected = DataFrame(
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index fbc3b385e5098..970d4f155ecfc 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -61,7 +61,7 @@ def test_custom_grouper(index):
# check all cython functions work
g.ohlc() # doesn't use _cython_agg_general
- funcs = ["add", "mean", "prod", "min", "max", "var"]
+ funcs = ["sum", "mean", "prod", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f, alt=None, numeric_only=True)
@@ -69,7 +69,7 @@ def test_custom_grouper(index):
g = s.groupby(b)
# check all cython functions work
g.ohlc() # doesn't use _cython_agg_general
- funcs = ["add", "mean", "prod", "min", "max", "var"]
+ funcs = ["sum", "mean", "prod", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f, alt=None, numeric_only=True)
@@ -414,7 +414,7 @@ def test_resample_upsampling_picked_but_not_correct():
tm.assert_series_equal(result2, expected)
-@pytest.mark.parametrize("f", ["add", "mean", "prod", "min", "max", "var"])
+@pytest.mark.parametrize("f", ["sum", "mean", "prod", "min", "max", "var"])
def test_resample_frame_basic_cy_funcs(f):
df = tm.makeTimeDataFrame()
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
sum is the one method whose name doesn't agree with the corresponding `how` | https://api.github.com/repos/pandas-dev/pandas/pulls/47892 | 2022-07-29T01:23:26Z | 2022-07-29T20:56:43Z | 2022-07-29T20:56:43Z | 2022-07-29T20:57:40Z |
CI: Add CodeQL Github Action | diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 0000000000000..457aa69fb924f
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,31 @@
+name: CodeQL
+on:
+ schedule:
+ # every day at midnight
+ - cron: "0 0 * * *"
+
+concurrency:
+ group: ${{ github.repository }}-${{ github.head_ref || github.sha }}-${{ github.workflow }}
+ cancel-in-progress: true
+
+jobs:
+ analyze:
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language:
+ - python
+
+ steps:
+ - uses: actions/checkout@v3
+ - uses: github/codeql-action/init@v2
+ with:
+ languages: ${{ matrix.language }}
+ - uses: github/codeql-action/autobuild@v2
+ - uses: github/codeql-action/analyze@v2
| Similar to Ibis' configuration: https://github.com/ibis-project/ibis/blob/master/.github/workflows/codeql-analysis.yml
| https://api.github.com/repos/pandas-dev/pandas/pulls/47890 | 2022-07-28T23:33:39Z | 2022-07-30T00:46:00Z | 2022-07-30T00:46:00Z | 2022-07-30T17:17:29Z |
CI: Pin cython on 32bit build | diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml
index e091160c952f8..8c74a53feed9b 100644
--- a/.github/workflows/32-bit-linux.yml
+++ b/.github/workflows/32-bit-linux.yml
@@ -38,7 +38,7 @@ jobs:
/opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \
. ~/virtualenvs/pandas-dev/bin/activate && \
python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \
- pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
+ pip install cython==0.29.30 numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
python setup.py build_ext -q -j2 && \
python -m pip install --no-build-isolation --no-use-pep517 -e . && \
export PANDAS_CI=1 && \
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47889 | 2022-07-28T22:38:00Z | 2022-07-28T23:37:27Z | 2022-07-28T23:37:27Z | 2022-07-29T00:57:16Z |
REF: Rename exchange -> interchange | diff --git a/doc/source/reference/general_functions.rst b/doc/source/reference/general_functions.rst
index f82d9c9a6482c..474e37a85d857 100644
--- a/doc/source/reference/general_functions.rst
+++ b/doc/source/reference/general_functions.rst
@@ -85,4 +85,4 @@ Importing from other DataFrame libraries
.. autosummary::
:toctree: api/
- api.exchange.from_dataframe
+ api.interchange.from_dataframe
diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py
index 22a09ed61d694..9d4f721225d93 100644
--- a/pandas/api/__init__.py
+++ b/pandas/api/__init__.py
@@ -1,13 +1,13 @@
""" public toolkit API """
from pandas.api import (
- exchange,
extensions,
indexers,
+ interchange,
types,
)
__all__ = [
- "exchange",
+ "interchange",
"extensions",
"indexers",
"types",
diff --git a/pandas/api/exchange/__init__.py b/pandas/api/exchange/__init__.py
deleted file mode 100644
index 6760d81f60ac7..0000000000000
--- a/pandas/api/exchange/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""
-Public API for DataFrame exchange protocol.
-"""
-
-from pandas.core.exchange.dataframe_protocol import DataFrame
-from pandas.core.exchange.from_dataframe import from_dataframe
-
-__all__ = ["from_dataframe", "DataFrame"]
diff --git a/pandas/api/interchange/__init__.py b/pandas/api/interchange/__init__.py
new file mode 100644
index 0000000000000..2f3a73bc46b31
--- /dev/null
+++ b/pandas/api/interchange/__init__.py
@@ -0,0 +1,8 @@
+"""
+Public API for DataFrame interchange protocol.
+"""
+
+from pandas.core.interchange.dataframe_protocol import DataFrame
+from pandas.core.interchange.from_dataframe import from_dataframe
+
+__all__ = ["from_dataframe", "DataFrame"]
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a541cbfe502fb..752d7283329e8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -220,8 +220,8 @@
if TYPE_CHECKING:
- from pandas.core.exchange.dataframe_protocol import DataFrame as DataFrameXchg
from pandas.core.groupby.generic import DataFrameGroupBy
+ from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg
from pandas.core.internals import SingleDataManager
from pandas.core.resample import Resampler
@@ -819,7 +819,7 @@ def __dataframe__(
self, nan_as_null: bool = False, allow_copy: bool = True
) -> DataFrameXchg:
"""
- Return the dataframe exchange object implementing the exchange protocol.
+ Return the dataframe interchange object implementing the interchange protocol.
Parameters
----------
@@ -832,19 +832,19 @@ def __dataframe__(
Returns
-------
- DataFrame exchange object
+ DataFrame interchange object
The object which consuming library can use to ingress the dataframe.
Notes
-----
- Details on the exchange protocol:
+ Details on the interchange protocol:
https://data-apis.org/dataframe-protocol/latest/index.html
`nan_as_null` currently has no effect; once support for nullable extension
dtypes is added, this value should be propagated to columns.
"""
- from pandas.core.exchange.dataframe import PandasDataFrameXchg
+ from pandas.core.interchange.dataframe import PandasDataFrameXchg
return PandasDataFrameXchg(self, nan_as_null, allow_copy)
diff --git a/pandas/core/exchange/__init__.py b/pandas/core/interchange/__init__.py
similarity index 100%
rename from pandas/core/exchange/__init__.py
rename to pandas/core/interchange/__init__.py
diff --git a/pandas/core/exchange/buffer.py b/pandas/core/interchange/buffer.py
similarity index 97%
rename from pandas/core/exchange/buffer.py
rename to pandas/core/interchange/buffer.py
index a3b05a0c5d24a..1d24efc263ca0 100644
--- a/pandas/core/exchange/buffer.py
+++ b/pandas/core/interchange/buffer.py
@@ -3,7 +3,7 @@
import numpy as np
from packaging import version
-from pandas.core.exchange.dataframe_protocol import (
+from pandas.core.interchange.dataframe_protocol import (
Buffer,
DlpackDeviceType,
)
diff --git a/pandas/core/exchange/column.py b/pandas/core/interchange/column.py
similarity index 98%
rename from pandas/core/exchange/column.py
rename to pandas/core/interchange/column.py
index c2a1cfe766b22..9ef73aa1f40e0 100644
--- a/pandas/core/exchange/column.py
+++ b/pandas/core/interchange/column.py
@@ -12,14 +12,14 @@
is_categorical_dtype,
is_string_dtype,
)
-from pandas.core.exchange.buffer import PandasBuffer
-from pandas.core.exchange.dataframe_protocol import (
+from pandas.core.interchange.buffer import PandasBuffer
+from pandas.core.interchange.dataframe_protocol import (
Column,
ColumnBuffers,
ColumnNullType,
DtypeKind,
)
-from pandas.core.exchange.utils import (
+from pandas.core.interchange.utils import (
ArrowCTypes,
Endianness,
NoBufferPresent,
@@ -136,7 +136,7 @@ def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]:
kind = _NP_KINDS.get(dtype.kind, None)
if kind is None:
# Not a NumPy dtype. Check if it's a categorical maybe
- raise ValueError(f"Data type {dtype} not supported by exchange protocol")
+ raise ValueError(f"Data type {dtype} not supported by interchange protocol")
return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), dtype.byteorder
diff --git a/pandas/core/exchange/dataframe.py b/pandas/core/interchange/dataframe.py
similarity index 96%
rename from pandas/core/exchange/dataframe.py
rename to pandas/core/interchange/dataframe.py
index e5bb3811afed0..ddcffbff64670 100644
--- a/pandas/core/exchange/dataframe.py
+++ b/pandas/core/interchange/dataframe.py
@@ -4,8 +4,8 @@
from typing import TYPE_CHECKING
import pandas as pd
-from pandas.core.exchange.column import PandasColumn
-from pandas.core.exchange.dataframe_protocol import DataFrame as DataFrameXchg
+from pandas.core.interchange.column import PandasColumn
+from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg
if TYPE_CHECKING:
from pandas import Index
diff --git a/pandas/core/exchange/dataframe_protocol.py b/pandas/core/interchange/dataframe_protocol.py
similarity index 99%
rename from pandas/core/exchange/dataframe_protocol.py
rename to pandas/core/interchange/dataframe_protocol.py
index 367b906332741..036f84a393903 100644
--- a/pandas/core/exchange/dataframe_protocol.py
+++ b/pandas/core/interchange/dataframe_protocol.py
@@ -389,7 +389,7 @@ class DataFrame(ABC):
@abstractmethod
def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
- """Construct a new exchange object, potentially changing the parameters."""
+ """Construct a new interchange object, potentially changing the parameters."""
pass
@property
diff --git a/pandas/core/exchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py
similarity index 97%
rename from pandas/core/exchange/from_dataframe.py
rename to pandas/core/interchange/from_dataframe.py
index a33e47ba3b68e..ae9b39de54d41 100644
--- a/pandas/core/exchange/from_dataframe.py
+++ b/pandas/core/interchange/from_dataframe.py
@@ -7,14 +7,14 @@
import numpy as np
import pandas as pd
-from pandas.core.exchange.dataframe_protocol import (
+from pandas.core.interchange.dataframe_protocol import (
Buffer,
Column,
ColumnNullType,
DataFrame as DataFrameXchg,
DtypeKind,
)
-from pandas.core.exchange.utils import (
+from pandas.core.interchange.utils import (
ArrowCTypes,
Endianness,
)
@@ -34,7 +34,7 @@ def from_dataframe(df, allow_copy=True) -> pd.DataFrame:
Parameters
----------
df : DataFrameXchg
- Object supporting the exchange protocol, i.e. `__dataframe__` method.
+ Object supporting the interchange protocol, i.e. `__dataframe__` method.
allow_copy : bool, default: True
Whether to allow copying the memory to perform the conversion
(if false then zero-copy approach is requested).
@@ -54,12 +54,12 @@ def from_dataframe(df, allow_copy=True) -> pd.DataFrame:
def _from_dataframe(df: DataFrameXchg, allow_copy=True):
"""
- Build a ``pd.DataFrame`` from the DataFrame exchange object.
+ Build a ``pd.DataFrame`` from the DataFrame interchange object.
Parameters
----------
df : DataFrameXchg
- Object supporting the exchange protocol, i.e. `__dataframe__` method.
+ Object supporting the interchange protocol, i.e. `__dataframe__` method.
allow_copy : bool, default: True
Whether to allow copying the memory to perform the conversion
(if false then zero-copy approach is requested).
@@ -91,7 +91,7 @@ def _from_dataframe(df: DataFrameXchg, allow_copy=True):
def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:
"""
- Convert exchange protocol chunk to ``pd.DataFrame``.
+ Convert interchange protocol chunk to ``pd.DataFrame``.
Parameters
----------
diff --git a/pandas/core/exchange/utils.py b/pandas/core/interchange/utils.py
similarity index 96%
rename from pandas/core/exchange/utils.py
rename to pandas/core/interchange/utils.py
index 2cc5126591718..1d56af94b2629 100644
--- a/pandas/core/exchange/utils.py
+++ b/pandas/core/interchange/utils.py
@@ -1,5 +1,5 @@
"""
-Utility functions and objects for implementing the exchange API.
+Utility functions and objects for implementing the interchange API.
"""
from __future__ import annotations
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 6350f402ac0e5..c62a86e1983f5 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -277,7 +277,7 @@ def test_np():
class TestApi(Base):
- allowed = ["types", "extensions", "indexers", "exchange"]
+ allowed = ["types", "extensions", "indexers", "interchange"]
def test_api(self):
self.check(api, self.allowed)
diff --git a/pandas/tests/exchange/__init__.py b/pandas/tests/interchange/__init__.py
similarity index 100%
rename from pandas/tests/exchange/__init__.py
rename to pandas/tests/interchange/__init__.py
diff --git a/pandas/tests/exchange/conftest.py b/pandas/tests/interchange/conftest.py
similarity index 100%
rename from pandas/tests/exchange/conftest.py
rename to pandas/tests/interchange/conftest.py
diff --git a/pandas/tests/exchange/test_impl.py b/pandas/tests/interchange/test_impl.py
similarity index 97%
rename from pandas/tests/exchange/test_impl.py
rename to pandas/tests/interchange/test_impl.py
index e0e9fdce645d0..5168e1acc8e7e 100644
--- a/pandas/tests/exchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -6,11 +6,11 @@
import pandas as pd
import pandas._testing as tm
-from pandas.core.exchange.dataframe_protocol import (
+from pandas.core.interchange.dataframe_protocol import (
ColumnNullType,
DtypeKind,
)
-from pandas.core.exchange.from_dataframe import from_dataframe
+from pandas.core.interchange.from_dataframe import from_dataframe
test_data_categorical = {
"ordered": pd.Categorical(list("testdata") * 30, ordered=True),
diff --git a/pandas/tests/exchange/test_spec_conformance.py b/pandas/tests/interchange/test_spec_conformance.py
similarity index 100%
rename from pandas/tests/exchange/test_spec_conformance.py
rename to pandas/tests/interchange/test_spec_conformance.py
diff --git a/pandas/tests/exchange/test_utils.py b/pandas/tests/interchange/test_utils.py
similarity index 95%
rename from pandas/tests/exchange/test_utils.py
rename to pandas/tests/interchange/test_utils.py
index 4c80ecf0d23a0..4fd42abb7f3f1 100644
--- a/pandas/tests/exchange/test_utils.py
+++ b/pandas/tests/interchange/test_utils.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas.core.exchange.utils import dtype_to_arrow_c_fmt
+from pandas.core.interchange.utils import dtype_to_arrow_c_fmt
# TODO: use ArrowSchema to get reference C-string.
# At the time, there is no way to access ArrowSchema holding a type format string
| Recommended by @vnlitvinov
| https://api.github.com/repos/pandas-dev/pandas/pulls/47888 | 2022-07-28T20:54:04Z | 2022-07-30T00:45:46Z | 2022-07-30T00:45:45Z | 2022-07-30T17:17:42Z |
REF: Change _NULL_DESCRIPTION[datetime] to use NaT sentinel | diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py
index 9ef73aa1f40e0..9ba237a94b8fe 100644
--- a/pandas/core/interchange/column.py
+++ b/pandas/core/interchange/column.py
@@ -5,6 +5,7 @@
import numpy as np
from pandas._libs.lib import infer_dtype
+from pandas._libs.tslibs import iNaT
from pandas.util._decorators import cache_readonly
import pandas as pd
@@ -38,7 +39,7 @@
_NULL_DESCRIPTION = {
DtypeKind.FLOAT: (ColumnNullType.USE_NAN, None),
- DtypeKind.DATETIME: (ColumnNullType.USE_NAN, None),
+ DtypeKind.DATETIME: (ColumnNullType.USE_SENTINEL, iNaT),
DtypeKind.INT: (ColumnNullType.NON_NULLABLE, None),
DtypeKind.UINT: (ColumnNullType.NON_NULLABLE, None),
DtypeKind.BOOL: (ColumnNullType.NON_NULLABLE, None),
diff --git a/pandas/core/interchange/dataframe_protocol.py b/pandas/core/interchange/dataframe_protocol.py
index 036f84a393903..6f4ec673f03e0 100644
--- a/pandas/core/interchange/dataframe_protocol.py
+++ b/pandas/core/interchange/dataframe_protocol.py
@@ -70,7 +70,7 @@ class ColumnNullType(enum.IntEnum):
NON_NULLABLE : int
Non-nullable column.
USE_NAN : int
- Use explicit float NaN/NaT value.
+ Use explicit float NaN value.
USE_SENTINEL : int
Sentinel value besides NaN/NaT.
USE_BITMASK : int
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 4c4c2a99c5558..0c2cffff8a159 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -4,6 +4,8 @@
import numpy as np
import pytest
+from pandas._libs.tslibs import iNaT
+
import pandas as pd
import pandas._testing as tm
from pandas.core.interchange.dataframe_protocol import (
@@ -176,3 +178,15 @@ def test_nonstring_object():
col = df.__dataframe__().get_column_by_name("A")
with pytest.raises(NotImplementedError, match="not supported yet"):
col.dtype
+
+
+def test_datetime():
+ df = pd.DataFrame({"A": [pd.Timestamp("2022-01-01"), pd.NaT]})
+ col = df.__dataframe__().get_column_by_name("A")
+
+ assert col.size == 2
+ assert col.null_count == 1
+ assert col.dtype[0] == DtypeKind.DATETIME
+ assert col.describe_null == (ColumnNullType.USE_SENTINEL, iNaT)
+
+ tm.assert_frame_equal(df, from_dataframe(df.__dataframe__()))
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
xref https://github.com/data-apis/dataframe-api/pull/74#discussion_r921464588
cc @vnlitvinov @jorisvandenbossche
| https://api.github.com/repos/pandas-dev/pandas/pulls/47887 | 2022-07-28T20:44:22Z | 2022-08-10T18:33:38Z | 2022-08-10T18:33:38Z | 2022-08-10T18:33:42Z |
REF: PandasColumn.describe_categorical returns categores instead of mapping | diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py
index 9ef73aa1f40e0..2d0aaa6b7c616 100644
--- a/pandas/core/interchange/column.py
+++ b/pandas/core/interchange/column.py
@@ -145,15 +145,18 @@ def describe_categorical(self):
"""
If the dtype is categorical, there are two options:
- There are only values in the data buffer.
- - There is a separate dictionary-style encoding for categorical values.
- Raises RuntimeError if the dtype is not categorical
+ - There is a separate non-categorical Column encoding for categorical values.
+
+ Raises TypeError if the dtype is not categorical
+
Content of returned dict:
- "is_ordered" : bool, whether the ordering of dictionary indices is
semantically meaningful.
- "is_dictionary" : bool, whether a dictionary-style mapping of
categorical values to other objects exists
- - "mapping" : dict, Python-level only (e.g. ``{int: str}``).
- None if not a dictionary-style categorical.
+ - "categories" : Column representing the (implicit) mapping of indices to
+ category values (e.g. an array of cat1, cat2, ...).
+ None if not a dictionary-style categorical.
"""
if not self.dtype[0] == DtypeKind.CATEGORICAL:
raise TypeError(
@@ -163,7 +166,7 @@ def describe_categorical(self):
return {
"is_ordered": self._col.cat.ordered,
"is_dictionary": True,
- "mapping": dict(enumerate(self._col.cat.categories)),
+ "categories": PandasColumn(pd.Series(self._col.cat.categories)),
}
@property
diff --git a/pandas/core/interchange/dataframe_protocol.py b/pandas/core/interchange/dataframe_protocol.py
index 036f84a393903..24ae6f846edf6 100644
--- a/pandas/core/interchange/dataframe_protocol.py
+++ b/pandas/core/interchange/dataframe_protocol.py
@@ -110,7 +110,7 @@ class CategoricalDescription(TypedDict):
is_dictionary: bool
# Python-level only (e.g. ``{int: str}``).
# None if not a dictionary-style categorical.
- mapping: dict | None
+ categories: Column | None
class Buffer(ABC):
@@ -274,17 +274,18 @@ def describe_categorical(self) -> CategoricalDescription:
"""
If the dtype is categorical, there are two options:
- There are only values in the data buffer.
- - There is a separate dictionary-style encoding for categorical values.
+ - There is a separate non-categorical Column encoding for categorical values.
Raises TypeError if the dtype is not categorical
Returns the dictionary with description on how to interpret the data buffer:
- "is_ordered" : bool, whether the ordering of dictionary indices is
semantically meaningful.
- - "is_dictionary" : bool, whether a dictionary-style mapping of
+ - "is_dictionary" : bool, whether a mapping of
categorical values to other objects exists
- - "mapping" : dict, Python-level only (e.g. ``{int: str}``).
- None if not a dictionary-style categorical.
+ - "categories" : Column representing the (implicit) mapping of indices to
+ category values (e.g. an array of cat1, cat2, ...).
+ None if not a dictionary-style categorical.
TBD: are there any other in-memory representations that are needed?
"""
diff --git a/pandas/core/interchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py
index a430e0c66a988..6e1b2de10e8e6 100644
--- a/pandas/core/interchange/from_dataframe.py
+++ b/pandas/core/interchange/from_dataframe.py
@@ -7,6 +7,7 @@
import numpy as np
import pandas as pd
+from pandas.core.interchange.column import PandasColumn
from pandas.core.interchange.dataframe_protocol import (
Buffer,
Column,
@@ -179,9 +180,10 @@ def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]:
if not categorical["is_dictionary"]:
raise NotImplementedError("Non-dictionary categoricals not supported yet")
- mapping = categorical["mapping"]
- assert isinstance(mapping, dict), "Categorical mapping must be a dict"
- categories = np.array(tuple(mapping[k] for k in sorted(mapping)))
+ cat_column = categorical["categories"]
+ # for mypy/pyright
+ assert isinstance(cat_column, PandasColumn), "categories must be a PandasColumn"
+ categories = np.array(cat_column._col)
buffers = col.get_buffers()
codes_buff, codes_dtype = buffers["data"]
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 4c4c2a99c5558..90b8b64a11242 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -6,6 +6,7 @@
import pandas as pd
import pandas._testing as tm
+from pandas.core.interchange.column import PandasColumn
from pandas.core.interchange.dataframe_protocol import (
ColumnNullType,
DtypeKind,
@@ -59,11 +60,13 @@ def test_categorical_dtype(data):
assert col.null_count == 0
assert col.describe_null == (ColumnNullType.USE_SENTINEL, -1)
assert col.num_chunks() == 1
- assert col.describe_categorical == {
- "is_ordered": data[1],
- "is_dictionary": True,
- "mapping": {0: "a", 1: "d", 2: "e", 3: "s", 4: "t"},
- }
+ desc_cat = col.describe_categorical
+ assert desc_cat["is_ordered"] == data[1]
+ assert desc_cat["is_dictionary"] is True
+ assert isinstance(desc_cat["categories"], PandasColumn)
+ tm.assert_series_equal(
+ desc_cat["categories"]._col, pd.Series(["a", "d", "e", "s", "t"])
+ )
tm.assert_frame_equal(df, from_dataframe(df.__dataframe__()))
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
xref: https://github.com/data-apis/dataframe-api/pull/69
cc @shwina @vnlitvinov | https://api.github.com/repos/pandas-dev/pandas/pulls/47886 | 2022-07-28T20:24:59Z | 2022-08-12T00:04:55Z | 2022-08-12T00:04:55Z | 2022-08-12T00:15:45Z |
DOC: Add numpydoc SS06 validation | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index b42ffc66f7714..113186c746157 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -78,8 +78,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT04, RT05, SA02, SA03, SA04, SS01, SS02, SS03, SS04, SS05)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05
+ MSG='Validate docstrings (EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT04, RT05, SA02, SA03, SA04, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05,SS06
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index eacbf1b016432..d5e77d824340d 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -102,8 +102,9 @@ class RegisteredOption(NamedTuple):
class OptionError(AttributeError, KeyError):
"""
- Exception for pandas.options, backwards compatible with KeyError
- checks.
+ Exception raised for pandas.options.
+
+ Backwards compatible with KeyError checks.
"""
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index bc0a63c5c5a33..ec1dbff6903e7 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -352,8 +352,9 @@ cdef class Interval(IntervalMixin):
cdef readonly str inclusive
"""
- Whether the interval is inclusive on the left-side, right-side, both or
- neither.
+ String describing the inclusive side the intervals.
+
+ Either ``left``, ``right``, ``both`` or ``neither``.
"""
def __init__(self, left, right, inclusive: str | None = None, closed: None | lib.NoDefault = lib.no_default):
@@ -384,10 +385,11 @@ cdef class Interval(IntervalMixin):
@property
def closed(self):
"""
- Whether the interval is closed on the left-side, right-side, both or
- neither.
+ String describing the inclusive side the intervals.
.. deprecated:: 1.5.0
+
+ Either ``left``, ``right``, ``both`` or ``neither``.
"""
warnings.warn(
"Attribute `closed` is deprecated in favor of `inclusive`.",
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index e353d224708b7..c90c9003c8d60 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -1347,8 +1347,7 @@ cdef object _try_infer_map(object dtype):
def infer_dtype(value: object, skipna: bool = True) -> str:
"""
- Efficiently infer the type of a passed val, or list-like
- array of values. Return a string describing the type.
+ Return a string label of the type of a scalar or list-like of values.
Parameters
----------
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 93687abdf9153..b05b0ba636251 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -446,6 +446,7 @@ class NaTType(_NaT):
"weekday",
"""
Return the day of the week represented by the date.
+
Monday == 0 ... Sunday == 6.
""",
)
@@ -453,6 +454,7 @@ class NaTType(_NaT):
"isoweekday",
"""
Return the day of the week represented by the date.
+
Monday == 1 ... Sunday == 7.
""",
)
@@ -533,10 +535,7 @@ class NaTType(_NaT):
strftime = _make_error_func(
"strftime",
"""
- Timestamp.strftime(format)
-
- Return a string representing the given POSIX timestamp
- controlled by an explicit format string.
+ Return a formatted string of the Timestamp.
Parameters
----------
@@ -680,10 +679,7 @@ class NaTType(_NaT):
fromordinal = _make_error_func(
"fromordinal",
"""
- Timestamp.fromordinal(ordinal, freq=None, tz=None)
-
- Passed an ordinal, translate and convert to a ts.
- Note: by definition there cannot be any tz info on the ordinal itself.
+ Construct a timestamp from a a proleptic Gregorian ordinal.
Parameters
----------
@@ -694,6 +690,10 @@ class NaTType(_NaT):
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for the Timestamp.
+ Notes
+ -----
+ By definition there cannot be any tz info on the ordinal itself.
+
Examples
--------
>>> pd.Timestamp.fromordinal(737425)
@@ -725,10 +725,7 @@ class NaTType(_NaT):
now = _make_nat_func(
"now",
"""
- Timestamp.now(tz=None)
-
- Return new Timestamp object representing current time local to
- tz.
+ Return new Timestamp object representing current time local to tz.
Parameters
----------
@@ -749,10 +746,9 @@ class NaTType(_NaT):
today = _make_nat_func(
"today",
"""
- Timestamp.today(cls, tz=None)
+ Return the current time in the local timezone.
- Return the current time in the local timezone. This differs
- from datetime.today() in that it can be localized to a
+ This differs from datetime.today() in that it can be localized to a
passed timezone.
Parameters
@@ -1090,7 +1086,9 @@ timedelta}, default 'raise'
tz_localize = _make_nat_func(
"tz_localize",
"""
- Convert naive Timestamp to local time zone, or remove
+ Localize the Timestamp to a timezone.
+
+ Convert naive Timestamp to local time zone or remove
timezone from timezone-aware Timestamp.
Parameters
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 8ab0ba24f9151..c58a8d4dc4ba6 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -156,16 +156,16 @@ cdef inline bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1:
class OutOfBoundsDatetime(ValueError):
"""
- Raised when the datetime is outside the range that
- can be represented.
+ Raised when the datetime is outside the range that can be represented.
"""
pass
class OutOfBoundsTimedelta(ValueError):
"""
- Raised when encountering a timedelta value that cannot be represented
- as a timedelta64[ns].
+ Raised when encountering a timedelta value that cannot be represented.
+
+ Representation should be within a timedelta64[ns].
"""
# Timedelta analogue to OutOfBoundsDatetime
pass
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 5f4f6b998a60a..48104965ec42b 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -585,9 +585,7 @@ cdef class BaseOffset:
def apply_index(self, dtindex):
"""
- Vectorized apply of DateOffset to DatetimeIndex,
- raises NotImplementedError for offsets without a
- vectorized implementation.
+ Vectorized apply of DateOffset to DatetimeIndex.
.. deprecated:: 1.1.0
@@ -2448,8 +2446,7 @@ cdef class SemiMonthOffset(SingleConstructorOffset):
cdef class SemiMonthEnd(SemiMonthOffset):
"""
- Two DateOffset's per month repeating on the last
- day of the month and day_of_month.
+ Two DateOffset's per month repeating on the last day of the month & day_of_month.
Parameters
----------
@@ -2470,8 +2467,7 @@ cdef class SemiMonthEnd(SemiMonthOffset):
cdef class SemiMonthBegin(SemiMonthOffset):
"""
- Two DateOffset's per month repeating on the first
- day of the month and day_of_month.
+ Two DateOffset's per month repeating on the first day of the month & day_of_month.
Parameters
----------
@@ -2704,8 +2700,9 @@ cdef class WeekOfMonth(WeekOfMonthMixin):
cdef class LastWeekOfMonth(WeekOfMonthMixin):
"""
- Describes monthly dates in last week of month like "the last Tuesday of
- each month".
+ Describes monthly dates in last week of month.
+
+ For example "the last Tuesday of each month".
Parameters
----------
@@ -2991,8 +2988,9 @@ cdef class FY5253(FY5253Mixin):
cdef class FY5253Quarter(FY5253Mixin):
"""
- DateOffset increments between business quarter dates
- for 52-53 week fiscal year (also known as a 4-4-5 calendar).
+ DateOffset increments between business quarter dates for 52-53 week fiscal year.
+
+ Also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
@@ -3602,8 +3600,7 @@ def _get_offset(name: str) -> BaseOffset:
cpdef to_offset(freq):
"""
- Return DateOffset object from string or tuple representation
- or datetime.timedelta object.
+ Return DateOffset object from string or datetime.timedelta object.
Parameters
----------
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 3332628627739..3acaa024f52c2 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -2322,12 +2322,12 @@ cdef class _Period(PeriodMixin):
def strftime(self, fmt: str) -> str:
r"""
- Returns the string representation of the :class:`Period`, depending
- on the selected ``fmt``. ``fmt`` must be a string
- containing one or several directives. The method recognizes the same
- directives as the :func:`time.strftime` function of the standard Python
- distribution, as well as the specific additional directives ``%f``,
- ``%F``, ``%q``, ``%l``, ``%u``, ``%n``.
+ Returns a formatted string representation of the :class:`Period`.
+
+ ``fmt`` must be a string containing one or several directives.
+ The method recognizes the same directives as the :func:`time.strftime`
+ function of the standard Python distribution, as well as the specific
+ additional directives ``%f``, ``%F``, ``%q``, ``%l``, ``%u``, ``%n``.
(formatting & docs originally from scikits.timeries).
+-----------+--------------------------------+-------+
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 39458c10ad35b..215d1c9d6c722 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1464,7 +1464,8 @@ cdef class _Timedelta(timedelta):
def isoformat(self) -> str:
"""
- Format Timedelta as ISO 8601 Duration like
+ Format the Timedelta as ISO 8601 Duration.
+
``P[n]Y[n]M[n]DT[n]H[n]M[n]S``, where the ``[n]`` s are replaced by the
values. See https://en.wikipedia.org/wiki/ISO_8601#Durations.
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index dc4da6c9bf4d2..66d848ba43da9 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1345,10 +1345,7 @@ class Timestamp(_Timestamp):
@classmethod
def fromordinal(cls, ordinal, freq=None, tz=None):
"""
- Timestamp.fromordinal(ordinal, freq=None, tz=None)
-
- Passed an ordinal, translate and convert to a ts.
- Note: by definition there cannot be any tz info on the ordinal itself.
+ Construct a timestamp from a a proleptic Gregorian ordinal.
Parameters
----------
@@ -1359,6 +1356,10 @@ class Timestamp(_Timestamp):
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for the Timestamp.
+ Notes
+ -----
+ By definition there cannot be any tz info on the ordinal itself.
+
Examples
--------
>>> pd.Timestamp.fromordinal(737425)
@@ -1370,10 +1371,7 @@ class Timestamp(_Timestamp):
@classmethod
def now(cls, tz=None):
"""
- Timestamp.now(tz=None)
-
- Return new Timestamp object representing current time local to
- tz.
+ Return new Timestamp object representing current time local to tz.
Parameters
----------
@@ -1397,10 +1395,9 @@ class Timestamp(_Timestamp):
@classmethod
def today(cls, tz=None):
"""
- Timestamp.today(cls, tz=None)
+ Return the current time in the local timezone.
- Return the current time in the local timezone. This differs
- from datetime.today() in that it can be localized to a
+ This differs from datetime.today() in that it can be localized to a
passed timezone.
Parameters
@@ -1477,10 +1474,7 @@ class Timestamp(_Timestamp):
def strftime(self, format):
"""
- Timestamp.strftime(format)
-
- Return a string representing the given POSIX timestamp
- controlled by an explicit format string.
+ Return a formatted string of the Timestamp.
Parameters
----------
@@ -2052,7 +2046,9 @@ timedelta}, default 'raise'
def tz_localize(self, tz, ambiguous='raise', nonexistent='raise'):
"""
- Convert naive Timestamp to local time zone, or remove
+ Localize the Timestamp to a timezone.
+
+ Convert naive Timestamp to local time zone or remove
timezone from timezone-aware Timestamp.
Parameters
@@ -2343,6 +2339,7 @@ default 'raise'
def to_julian_date(self) -> np.float64:
"""
Convert TimeStamp to a Julian Date.
+
0 Julian date is noon January 1, 4713 BC.
Examples
@@ -2374,6 +2371,7 @@ default 'raise'
def isoweekday(self):
"""
Return the day of the week represented by the date.
+
Monday == 1 ... Sunday == 7.
"""
# same as super().isoweekday(), but that breaks because of how
@@ -2383,6 +2381,7 @@ default 'raise'
def weekday(self):
"""
Return the day of the week represented by the date.
+
Monday == 0 ... Sunday == 6.
"""
# same as super().weekday(), but that breaks because of how
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index ea9414aaaa1a8..11c236836e791 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -953,9 +953,9 @@ def freqstr(self) -> str | None:
@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self) -> str | None:
"""
- Tries to return a string representing a frequency guess,
- generated by infer_freq. Returns None if it can't autodetect the
- frequency.
+ Tries to return a string representing a frequency generated by infer_freq.
+
+ Returns None if it can't autodetect the frequency.
"""
if self.ndim != 1:
return None
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 7a56bba0e58b3..ffd093b86582c 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -810,8 +810,7 @@ def tz_convert(self, tz) -> DatetimeArray:
@dtl.ravel_compat
def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArray:
"""
- Localize tz-naive Datetime Array/Index to tz-aware
- Datetime Array/Index.
+ Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
@@ -993,8 +992,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArr
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
- Return Datetime Array/Index as object ndarray of datetime.datetime
- objects.
+ Return an ndarray of datetime.datetime objects.
Returns
-------
@@ -1122,9 +1120,9 @@ def to_period(self, freq=None) -> PeriodArray:
def to_perioddelta(self, freq) -> TimedeltaArray:
"""
- Calculate TimedeltaArray of difference between index
- values and index converted to PeriodArray at specified
- freq. Used for vectorized offsets.
+ Calculate deltas between self values and self converted to Periods at a freq.
+
+ Used for vectorized offsets.
Parameters
----------
@@ -1157,8 +1155,7 @@ def to_perioddelta(self, freq) -> TimedeltaArray:
def month_name(self, locale=None) -> npt.NDArray[np.object_]:
"""
- Return the month names of the :class:`~pandas.Series` or
- :class:`~pandas.DatetimeIndex` with specified locale.
+ Return the month names with specified locale.
Parameters
----------
@@ -1202,8 +1199,7 @@ def month_name(self, locale=None) -> npt.NDArray[np.object_]:
def day_name(self, locale=None) -> npt.NDArray[np.object_]:
"""
- Return the day names of the :class:`~pandas.Series` or
- :class:`~pandas.DatetimeIndex` with specified locale.
+ Return the day names with specified locale.
Parameters
----------
@@ -1262,8 +1258,7 @@ def time(self) -> npt.NDArray[np.object_]:
@property
def timetz(self) -> npt.NDArray[np.object_]:
"""
- Returns numpy array of :class:`datetime.time` objects with timezone
- information.
+ Returns numpy array of :class:`datetime.time` objects with timezones.
The time part of the Timestamps.
"""
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 6469dccf6e2d5..e7198a95c07f1 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1254,8 +1254,7 @@ def _format_space(self) -> str:
@property
def left(self):
"""
- Return the left endpoints of each Interval in the IntervalArray as
- an Index.
+ Return the left endpoints of each Interval in the IntervalArray as an Index.
"""
from pandas import Index
@@ -1264,8 +1263,7 @@ def left(self):
@property
def right(self):
"""
- Return the right endpoints of each Interval in the IntervalArray as
- an Index.
+ Return the right endpoints of each Interval in the IntervalArray as an Index.
"""
from pandas import Index
@@ -1274,8 +1272,7 @@ def right(self):
@property
def length(self) -> Index:
"""
- Return an Index with entries denoting the length of each Interval in
- the IntervalArray.
+ Return an Index with entries denoting the length of each Interval.
"""
return self.right - self.left
@@ -1367,16 +1364,18 @@ def overlaps(self, other):
@property
def inclusive(self) -> IntervalInclusiveType:
"""
- Whether the intervals are inclusive on the left-side, right-side, both or
- neither.
+ String describing the inclusive side the intervals.
+
+ Either ``left``, ``right``, ``both`` or ``neither``.
"""
return self.dtype.inclusive
@property
def closed(self) -> IntervalInclusiveType:
"""
- Whether the intervals are closed on the left-side, right-side, both or
- neither.
+ String describing the inclusive side the intervals.
+
+ Either ``left``, ``right``, ``both`` or ``neither`.
"""
warnings.warn(
"Attribute `closed` is deprecated in favor of `inclusive`.",
@@ -1387,8 +1386,7 @@ def closed(self) -> IntervalInclusiveType:
_interval_shared_docs["set_closed"] = textwrap.dedent(
"""
- Return an %(klass)s identical to the current one, but closed on the
- specified side.
+ Return an identical %(klass)s closed on the specified side.
.. deprecated:: 1.5.0
@@ -1440,8 +1438,7 @@ def set_closed(
_interval_shared_docs["set_inclusive"] = textwrap.dedent(
"""
- Return an %(klass)s identical to the current one, but closed on the
- specified side.
+ Return an identical %(klass)s but closed on the specified side.
.. versionadded:: 1.5
@@ -1497,9 +1494,10 @@ def set_inclusive(
_interval_shared_docs[
"is_non_overlapping_monotonic"
] = """
- Return True if the %(klass)s is non-overlapping (no Intervals share
- points) and is either monotonic increasing or monotonic decreasing,
- else False.
+ Return a boolean whether the %(klass)s is non-overlapping and monotonic.
+
+ Non-overlapping means (no Intervals share points), and monotonic means
+ either monotonic increasing or monotonic decreasing.
"""
# https://github.com/python/mypy/issues/1362
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 5f227cb45a65b..4011f29855949 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -771,8 +771,7 @@ def total_seconds(self) -> npt.NDArray[np.float64]:
def to_pytimedelta(self) -> npt.NDArray[np.object_]:
"""
- Return Timedelta Array/Index as object ndarray of datetime.timedelta
- objects.
+ Return an ndarray of datetime.timedelta objects.
Returns
-------
@@ -800,8 +799,10 @@ def to_pytimedelta(self) -> npt.NDArray[np.object_]:
@property
def components(self) -> DataFrame:
"""
- Return a dataframe of the components (days, hours, minutes,
- seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
+ Return a DataFrame of the individual resolution components of the Timedeltas.
+
+ The components (days, hours, minutes seconds, milliseconds, microseconds,
+ nanoseconds) are returned as columns in a DataFrame.
Returns
-------
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 2fa3f57f950b5..f7e6c4434da32 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1054,8 +1054,7 @@ def is_unique(self) -> bool:
@property
def is_monotonic(self) -> bool:
"""
- Return boolean if values in the object are
- monotonic_increasing.
+ Return boolean if values in the object are monotonically increasing.
Returns
-------
@@ -1072,8 +1071,7 @@ def is_monotonic(self) -> bool:
@property
def is_monotonic_increasing(self) -> bool:
"""
- Return boolean if values in the object are
- monotonic_increasing.
+ Return boolean if values in the object are monotonically increasing.
Returns
-------
@@ -1086,8 +1084,7 @@ def is_monotonic_increasing(self) -> bool:
@property
def is_monotonic_decreasing(self) -> bool:
"""
- Return boolean if values in the object are
- monotonic_decreasing.
+ Return boolean if values in the object are monotonically decreasing.
Returns
-------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a541cbfe502fb..1c281f571d422 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4837,8 +4837,6 @@ def lookup(
) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
- Given equal-length arrays of row and column labels, return an
- array of the values corresponding to each (row, col) pair.
.. deprecated:: 1.2.0
DataFrame.lookup is deprecated,
@@ -4846,6 +4844,9 @@ def lookup(
For further details see
:ref:`Looking up values by index/column labels <indexing.lookup>`.
+ Given equal-length arrays of row and column labels, return an
+ array of the values corresponding to each (row, col) pair.
+
Parameters
----------
row_labels : sequence
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0a439faed0896..a033b7a3f83d7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9970,13 +9970,14 @@ def shift(
def slice_shift(self: NDFrameT, periods: int = 1, axis=0) -> NDFrameT:
"""
Equivalent to `shift` without copying data.
- The shifted data will not include the dropped periods and the
- shifted axis will be smaller than the original.
.. deprecated:: 1.2.0
slice_shift is deprecated,
use DataFrame/Series.shift instead.
+ The shifted data will not include the dropped periods and the
+ shifted axis will be smaller than the original.
+
Parameters
----------
periods : int
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 9b4991d32692b..8eac2260448ab 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -329,8 +329,7 @@ class providing the base-class of operations.
"""
_pipe_template = """
-Apply a function `func` with arguments to this %(klass)s object and return
-the function's result.
+Apply a ``func`` with arguments to this %(klass)s object and return its result.
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
@@ -381,8 +380,9 @@ class providing the base-class of operations.
"""
_transform_template = """
-Call function producing a same-indexed %(klass)s on each group and
-return a %(klass)s having the same indexes as the original object
+Call function producing a same-indexed %(klass)s on each group.
+
+Returns a %(klass)s having the same indexes as the original object
filled with the transformed values.
Parameters
diff --git a/pandas/core/indexers/objects.py b/pandas/core/indexers/objects.py
index beb4db644dd79..c15cbf368c159 100644
--- a/pandas/core/indexers/objects.py
+++ b/pandas/core/indexers/objects.py
@@ -238,8 +238,7 @@ def get_window_bounds(
class FixedForwardWindowIndexer(BaseIndexer):
"""
- Creates window boundaries for fixed-length windows that include the
- current row.
+ Creates window boundaries for fixed-length windows that include the current row.
Examples
--------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 239e6656ea151..26b833f78bec6 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -271,8 +271,9 @@ def _new_Index(cls, d):
class Index(IndexOpsMixin, PandasObject):
"""
- Immutable sequence used for indexing and alignment. The basic object
- storing axis labels for all pandas objects.
+ Immutable sequence used for indexing and alignment.
+
+ The basic object storing axis labels for all pandas objects.
Parameters
----------
@@ -2290,8 +2291,7 @@ def is_monotonic(self) -> bool:
@property
def is_monotonic_increasing(self) -> bool:
"""
- Return if the index is monotonic increasing (only equal or
- increasing) values.
+ Return a boolean if the values are equal or increasing.
Examples
--------
@@ -2307,8 +2307,7 @@ def is_monotonic_increasing(self) -> bool:
@property
def is_monotonic_decreasing(self) -> bool:
"""
- Return if the index is monotonic decreasing (only equal or
- decreasing) values.
+ Return a boolean if the values are equal or decreasing.
Examples
--------
@@ -3810,8 +3809,9 @@ def get_loc(self, key, method=None, tolerance=None):
_index_shared_docs[
"get_indexer"
] = """
- Compute indexer and mask for new index given the current index. The
- indexer should be then used as an input to ndarray.take to align the
+ Compute indexer and mask for new index given the current index.
+
+ The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
@@ -4580,8 +4580,7 @@ def join(
sort: bool = False,
) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
- Compute join_index and indexers to conform data
- structures to the new index.
+ Compute join_index and indexers to conform data structures to the new index.
Parameters
----------
@@ -5978,8 +5977,9 @@ def set_value(self, arr, key, value) -> None:
_index_shared_docs[
"get_indexer_non_unique"
] = """
- Compute indexer and mask for new index given the current index. The
- indexer should be then used as an input to ndarray.take to align the
+ Compute indexer and mask for new index given the current index.
+
+ The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 3a7adb19f1c01..30c770f32c2dc 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -496,8 +496,9 @@ def _get_time_micros(self) -> npt.NDArray[np.int64]:
def to_series(self, keep_tz=lib.no_default, index=None, name=None):
"""
- Create a Series with both index and values equal to the index keys
- useful with map for returning an indexer based on an index.
+ Create a Series with both index and values equal to the index keys.
+
+ Useful with map for returning an indexer based on an index.
Parameters
----------
@@ -826,8 +827,7 @@ def inferred_type(self) -> str:
def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]:
"""
- Return index locations of values at particular time of day
- (e.g. 9:30AM).
+ Return index locations of values at particular time of day.
Parameters
----------
@@ -867,8 +867,7 @@ def indexer_between_time(
self, start_time, end_time, include_start: bool = True, include_end: bool = True
) -> npt.NDArray[np.intp]:
"""
- Return index locations of values between particular times of day
- (e.g., 9:00-9:30AM).
+ Return index locations of values between particular times of day.
Parameters
----------
@@ -1134,8 +1133,7 @@ def bdate_range(
**kwargs,
) -> DatetimeIndex:
"""
- Return a fixed frequency DatetimeIndex, with business day as the default
- frequency.
+ Return a fixed frequency DatetimeIndex with business day as the default.
Parameters
----------
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index fd6b6ba63d7e0..a0398859abc2b 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1567,8 +1567,7 @@ def _get_level_number(self, level) -> int:
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
- return if the index is monotonic increasing (only equal or
- increasing) values.
+ Return a boolean if the values are equal or increasing.
"""
if any(-1 in code for code in self.codes):
return False
@@ -1600,8 +1599,7 @@ def is_monotonic_increasing(self) -> bool:
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
- return if the index is monotonic decreasing (only equal or
- decreasing) values.
+ Return a boolean if the values are equal or decreasing.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 5731d476cef10..a597bea0eb724 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -42,9 +42,10 @@
class NumericIndex(Index):
"""
- Immutable sequence used for indexing and alignment. The basic object
- storing axis labels for all pandas objects. NumericIndex is a special case
- of `Index` with purely numpy int/uint/float labels.
+ Immutable numeric sequence used for indexing and alignment.
+
+ The basic object storing axis labels for all pandas objects.
+ NumericIndex is a special case of `Index` with purely numpy int/uint/float labels.
.. versionadded:: 1.4.0
@@ -309,14 +310,15 @@ def _format_native_types(
_num_index_shared_docs[
"class_descr"
] = """
- Immutable sequence used for indexing and alignment. The basic object
- storing axis labels for all pandas objects. %(klass)s is a special case
- of `Index` with purely %(ltype)s labels. %(extra)s.
+ Immutable sequence used for indexing and alignment.
.. deprecated:: 1.4.0
In pandas v2.0 %(klass)s will be removed and :class:`NumericIndex` used instead.
%(klass)s will remain fully functional for the duration of pandas 1.x.
+ The basic object storing axis labels for all pandas objects.
+ %(klass)s is a special case of `Index` with purely %(ltype)s labels. %(extra)s.
+
Parameters
----------
data : array-like (1-dimensional)
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 095c5d1b1ba03..12a8f2c0d5a9d 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -47,8 +47,9 @@
)
class TimedeltaIndex(DatetimeTimedeltaMixin):
"""
- Immutable ndarray of timedelta64 data, represented internally as int64, and
- which can be boxed to timedelta objects.
+ Immutable Index of timedelta64 data.
+
+ Represented internally as int64, and scalars returned Timedelta objects.
Parameters
----------
@@ -209,8 +210,7 @@ def timedelta_range(
closed=None,
) -> TimedeltaIndex:
"""
- Return a fixed frequency TimedeltaIndex, with day as the default
- frequency.
+ Return a fixed frequency TimedeltaIndex with day as the default.
Parameters
----------
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 917382544199a..8797324166745 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -362,8 +362,9 @@ def aggregate(self, func=None, *args, **kwargs):
def transform(self, arg, *args, **kwargs):
"""
- Call function producing a like-indexed Series on each group and return
- a Series with the transformed values.
+ Call function producing a like-indexed Series on each group.
+
+ Return a Series with the transformed values.
Parameters
----------
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 523cd56db3e0a..5328c7995ea6f 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -157,8 +157,9 @@ def concat(
copy: bool = True,
) -> DataFrame | Series:
"""
- Concatenate pandas objects along a particular axis with optional set logic
- along the other axes.
+ Concatenate pandas objects along a particular axis.
+
+ Allows optional set logic along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 03aad0ef64dec..5226c928c6f73 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -537,9 +537,10 @@ def crosstab(
normalize=False,
) -> DataFrame:
"""
- Compute a simple cross tabulation of two (or more) factors. By default
- computes a frequency table of the factors unless an array of values and an
- aggregation function are passed.
+ Compute a simple cross tabulation of two (or more) factors.
+
+ By default, computes a frequency table of the factors unless an
+ array of values and an aggregation function are passed.
Parameters
----------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 67cdb5d8d72ab..765bf9f7e04f1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2642,6 +2642,7 @@ def quantile(
def corr(self, other, method="pearson", min_periods=None) -> float:
"""
Compute correlation with `other` Series, excluding missing values.
+
The two `Series` objects are not required to be the same length and will be
aligned internally before the correlation function is applied.
@@ -2716,6 +2717,7 @@ def cov(
) -> float:
"""
Compute covariance with Series, excluding missing values.
+
The two `Series` objects are not required to be the same length and
will be aligned internally before the covariance is calculated.
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 73d5c04ecd652..d50daad9a22b1 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -2015,8 +2015,9 @@ def rstrip(self, to_strip=None):
_shared_docs[
"str_removefix"
] = r"""
- Remove a %(side)s from an object series. If the %(side)s is not present,
- the original string will be returned.
+ Remove a %(side)s from an object series.
+
+ If the %(side)s is not present, the original string will be returned.
Parameters
----------
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index e3f7e9d454383..d7f5e7aab58ab 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -15,21 +15,17 @@
class IntCastingNaNError(ValueError):
"""
- Raised when attempting an astype operation on an array with NaN to an integer
- dtype.
+ Exception raised when converting (``astype``) an array with NaN to an integer type.
"""
- pass
-
class NullFrequencyError(ValueError):
"""
- Error raised when a null `freq` attribute is used in an operation
- that needs a non-null frequency, particularly `DatetimeIndex.shift`,
- `TimedeltaIndex.shift`, `PeriodIndex.shift`.
- """
+ Exception raised when a ``freq`` cannot be null.
- pass
+ Particularly ``DatetimeIndex.shift``, ``TimedeltaIndex.shift``,
+ ``PeriodIndex.shift``.
+ """
class PerformanceWarning(Warning):
@@ -40,16 +36,17 @@ class PerformanceWarning(Warning):
class UnsupportedFunctionCall(ValueError):
"""
- Exception raised when attempting to call a numpy function
- on a pandas object, but that function is not supported by
- the object e.g. ``np.cumsum(groupby_object)``.
+ Exception raised when attempting to call a unsupported numpy function.
+
+ For example, ``np.cumsum(groupby_object)``.
"""
class UnsortedIndexError(KeyError):
"""
- Error raised when attempting to get a slice of a MultiIndex,
- and the index has not been lexsorted. Subclass of `KeyError`.
+ Error raised when slicing a MultiIndex which has not been lexsorted.
+
+ Subclass of `KeyError`.
"""
@@ -124,8 +121,7 @@ class DtypeWarning(Warning):
class EmptyDataError(ValueError):
"""
- Exception that is thrown in `pd.read_csv` (by both the C and
- Python engines) when empty data or header is encountered.
+ Exception raised in ``pd.read_csv`` when empty data or header is encountered.
"""
@@ -172,8 +168,9 @@ class ParserWarning(Warning):
class MergeError(ValueError):
"""
- Error raised when problems arise during merging due to problems
- with input data. Subclass of `ValueError`.
+ Exception raised when merging data.
+
+ Subclass of ``ValueError``.
"""
@@ -185,8 +182,7 @@ class AccessorRegistrationWarning(Warning):
class AbstractMethodError(NotImplementedError):
"""
- Raise this error instead of NotImplementedError for abstract methods
- while keeping compatibility with Python 2 and Python 3.
+ Raise this error instead of NotImplementedError for abstract methods.
"""
def __init__(self, class_instance, methodtype: str = "method") -> None:
@@ -243,17 +239,23 @@ class InvalidIndexError(Exception):
class DataError(Exception):
"""
- Exception raised when trying to perform a ohlc on a non-numnerical column.
- Or, it can be raised when trying to apply a function to a non-numerical
- column on a rolling window.
+ Exceptionn raised when performing an operation on non-numerical data.
+
+ For example, calling ``ohlc`` on a non-numerical column or a function
+ on a rolling window.
"""
class SpecificationError(Exception):
"""
- Exception raised in two scenarios. The first way is calling agg on a
+ Exception raised by ``agg`` when the functions are ill-specified.
+
+ The exception raised in two scenarios.
+
+ The first way is calling ``agg`` on a
Dataframe or Series using a nested renamer (dict-of-dict).
- The second way is calling agg on a Dataframe with duplicated functions
+
+ The second way is calling ``agg`` on a Dataframe with duplicated functions
names without assigning column name.
Examples
@@ -274,9 +276,10 @@ class SpecificationError(Exception):
class SettingWithCopyError(ValueError):
"""
- Exception is raised when trying to set on a copied slice from a dataframe and
- the mode.chained_assignment is set to 'raise.' This can happen unintentionally
- when chained indexing.
+ Exception raised when trying to set on a copied slice from a ``DataFrame``.
+
+ The ``mode.chained_assignment`` needs to be set to set to 'raise.' This can
+ happen unintentionally when chained indexing.
For more information on eveluation order,
see :ref:`the user guide<indexing.evaluation_order>`.
@@ -295,9 +298,11 @@ class SettingWithCopyError(ValueError):
class SettingWithCopyWarning(Warning):
"""
- Warning is raised when trying to set on a copied slice from a dataframe and
- the mode.chained_assignment is set to 'warn.' 'Warn' is the default option.
- This can happen unintentionally when chained indexing.
+ Warning raised when trying to set on a copied slice from a ``DataFrame``.
+
+ The ``mode.chained_assignment`` needs to be set to set to 'warn.'
+ 'Warn' is the default option. This can happen unintentionally when
+ chained indexing.
For more information on eveluation order,
see :ref:`the user guide<indexing.evaluation_order>`.
@@ -315,10 +320,11 @@ class SettingWithCopyWarning(Warning):
class NumExprClobberingError(NameError):
"""
- Exception is raised when trying to use a built-in numexpr name as a variable name
- in a method like query or eval. Eval will throw the error if the engine is set
- to 'numexpr'. 'numexpr' is the default engine value for eval if the numexpr package
- is installed.
+ Exception raised when trying to use a built-in numexpr name as a variable name.
+
+ ``eval`` or ``query`` will throw the error if the engine is set
+ to 'numexpr'. 'numexpr' is the default engine value for these methods if the
+ numexpr package is installed.
Examples
--------
@@ -333,9 +339,9 @@ class NumExprClobberingError(NameError):
class UndefinedVariableError(NameError):
"""
- Exception is raised when trying to use an undefined variable name in a method
- like query or eval. It will also specific whether the undefined variable is
- local or not.
+ Exception raised by ``query`` or ``eval`` when using an undefined variable name.
+
+ It will also specify whether the undefined variable is local or not.
Examples
--------
@@ -380,15 +386,18 @@ class IndexingError(Exception):
class PyperclipException(RuntimeError):
"""
- Exception is raised when trying to use methods like to_clipboard() and
- read_clipboard() on an unsupported OS/platform.
+ Exception raised when clipboard functionality is unsupported.
+
+ Raised by ``to_clipboard()`` and ``read_clipboard()``.
"""
class PyperclipWindowsException(PyperclipException):
"""
- Exception is raised when pandas is unable to get access to the clipboard handle
- due to some other window process is accessing it.
+ Exception raised when clipboard functionality is unsupported by Windows.
+
+ Access to the clipboard handle would be denied due to some other
+ window process is accessing it.
"""
def __init__(self, message: str) -> None:
@@ -400,6 +409,7 @@ def __init__(self, message: str) -> None:
class CSSWarning(UserWarning):
"""
Warning is raised when converting css styling fails.
+
This can be due to the styling not having an equivalent value or because the
styling isn't properly formatted.
@@ -417,8 +427,7 @@ class CSSWarning(UserWarning):
class PossibleDataLossError(Exception):
"""
- Exception is raised when trying to open a HDFStore file when the file is already
- opened.
+ Exception raised when trying to open a HDFStore file when already opened.
Examples
--------
@@ -443,14 +452,15 @@ class ClosedFileError(Exception):
class IncompatibilityWarning(Warning):
"""
- Warning is raised when trying to use where criteria on an incompatible
- HDF5 file.
+ Warning raised when trying to use where criteria on an incompatible HDF5 file.
"""
class AttributeConflictWarning(Warning):
"""
- Warning is raised when attempting to append an index with a different
+ Warning raised when index attributes conflict when using HDFStore.
+
+ Occurs when attempting to append an index with a different
name than the existing index on an HDFStore or attempting to append an index with a
different frequency than the existing index on an HDFStore.
"""
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 01b4812d3dc2a..9e3f54169d178 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1479,8 +1479,7 @@ def to_string(
def set_td_classes(self, classes: DataFrame) -> Styler:
"""
- Set the DataFrame of strings added to the ``class`` attribute of ``<td>``
- HTML elements.
+ Set the ``class`` attribute of ``<td>`` HTML elements.
Parameters
----------
@@ -3173,8 +3172,7 @@ def text_gradient(
@Substitution(subset=subset)
def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler:
"""
- Set defined CSS-properties to each ``<td>`` HTML element within the given
- subset.
+ Set defined CSS-properties to each ``<td>`` HTML element for the given subset.
Parameters
----------
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index dba161cf6d45c..414bd3b76bd0d 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -62,24 +62,18 @@ class CSSDict(TypedDict):
Subset = Union[slice, Sequence, Index]
-def _gl01_adjust(obj: Any) -> Any:
- """Adjust docstrings for Numpydoc GLO1."""
- obj.__doc__ = "\n" + obj.__doc__
- return obj
-
-
class StylerRenderer:
"""
Base class to process rendering a Styler with a specified jinja2 template.
"""
- loader = _gl01_adjust(jinja2.PackageLoader("pandas", "io/formats/templates"))
- env = _gl01_adjust(jinja2.Environment(loader=loader, trim_blocks=True))
- template_html = _gl01_adjust(env.get_template("html.tpl"))
- template_html_table = _gl01_adjust(env.get_template("html_table.tpl"))
- template_html_style = _gl01_adjust(env.get_template("html_style.tpl"))
- template_latex = _gl01_adjust(env.get_template("latex.tpl"))
- template_string = _gl01_adjust(env.get_template("string.tpl"))
+ loader = jinja2.PackageLoader("pandas", "io/formats/templates")
+ env = jinja2.Environment(loader=loader, trim_blocks=True)
+ template_html = env.get_template("html.tpl")
+ template_html_table = env.get_template("html_table.tpl")
+ template_html_style = env.get_template("html_style.tpl")
+ template_latex = env.get_template("latex.tpl")
+ template_string = env.get_template("string.tpl")
def __init__(
self,
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 5c773a424a1c9..a4049eff8ae71 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1214,8 +1214,9 @@ def append(
errors: str = "strict",
) -> None:
"""
- Append to Table in file. Node must already exist and be Table
- format.
+ Append to Table in file.
+
+ Node must already exist and be Table format.
Parameters
----------
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 80b6db2500d28..3daa6d837349e 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1968,8 +1968,7 @@ def data_label(self) -> str:
def variable_labels(self) -> dict[str, str]:
"""
- Return variable labels as a dict, associating each variable name
- with corresponding label.
+ Return a dict associating each variable name with corresponding label.
Returns
-------
@@ -1979,8 +1978,7 @@ def variable_labels(self) -> dict[str, str]:
def value_labels(self) -> dict[str, dict[float, str]]:
"""
- Return a dict, associating each variable name a dict, associating
- each value its corresponding label.
+ Return a nested dict associating each variable name to its value and label.
Returns
-------
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 17763b25329ab..5bd2e8a53e8e8 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -270,8 +270,7 @@ def andrews_curves(
**kwargs,
) -> Axes:
"""
- Generate a matplotlib plot of Andrews curves, for visualising clusters of
- multivariate data.
+ Generate a matplotlib plot for visualising clusters of multivariate data.
Andrews curves have the functional form:
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 873103b01f64d..1a07c02f4024a 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -320,6 +320,7 @@ def test_overlap_public_nat_methods(klass, expected):
_get_overlap_public_nat_methods(Timestamp, True)
+ _get_overlap_public_nat_methods(Timedelta, True)
),
+ ids=lambda x: f"{x[0].__name__}.{x[1]}",
)
def test_nat_doc_strings(compare):
# see gh-17327
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 3a0c437c918fb..cbf02bc0a0156 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -40,7 +40,16 @@
# With template backend, matplotlib plots nothing
matplotlib.use("template")
-
+# Styler methods are Jinja2 objects who's docstrings we don't own.
+IGNORE_VALIDATION = {
+ "Styler.env",
+ "Styler.template_html",
+ "Styler.template_html_style",
+ "Styler.template_html_table",
+ "Styler.template_latex",
+ "Styler.template_string",
+ "Styler.loader",
+}
PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"]
ERROR_MSGS = {
"GL04": "Private classes ({mentioned_private_classes}) should not be "
@@ -121,6 +130,8 @@ def get_api_items(api_doc_fd):
position = None
continue
item = line.strip()
+ if item in IGNORE_VALIDATION:
+ continue
func = importlib.import_module(current_module)
for part in item.split("."):
func = getattr(func, part)
@@ -230,7 +241,8 @@ def pandas_validate(func_name: str):
Information about the docstring and the errors found.
"""
func_obj = Validator._load_obj(func_name)
- doc_obj = get_doc_object(func_obj)
+ # Some objects are instances, e.g. IndexSlice, which numpydoc can't validate
+ doc_obj = get_doc_object(func_obj, doc=func_obj.__doc__)
doc = PandasDocstring(func_name, doc_obj)
result = validate(doc_obj)
| - [x] closes #29254 (Replace xxxx with the Github issue number)
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Also removed some functions that were modifying jinija2 docstrings to pass the validation (which we probably shouldn't be doing). https://github.com/pandas-dev/pandas/pull/44567#discussion_r757097753 | https://api.github.com/repos/pandas-dev/pandas/pulls/47885 | 2022-07-28T16:38:15Z | 2022-07-30T00:44:45Z | 2022-07-30T00:44:45Z | 2022-07-30T17:17:36Z |
BUG: fix Dataframe.join with categorical index leads to unexpected reordering | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index e57166f7a4861..786bdd502fb1b 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -1024,6 +1024,7 @@ Reshaping
- Bug in :meth:`DataFrame.pivot_table` with ``sort=False`` results in sorted index (:issue:`17041`)
- Bug in :meth:`concat` when ``axis=1`` and ``sort=False`` where the resulting Index was a :class:`Int64Index` instead of a :class:`RangeIndex` (:issue:`46675`)
- Bug in :meth:`wide_to_long` raises when ``stubnames`` is missing in columns and ``i`` contains string dtype column (:issue:`46044`)
+- Bug in :meth:`DataFrame.join` with categorical index results in unexpected reordering (:issue:`47812`)
Sparse
^^^^^^
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index a212da050e1f1..239e6656ea151 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4682,6 +4682,7 @@ def join(
not isinstance(self, ABCMultiIndex)
or not any(is_categorical_dtype(dtype) for dtype in self.dtypes)
)
+ and not is_categorical_dtype(self.dtype)
):
# Categorical is monotonic if data are ordered as categories, but join can
# not handle this in case of not lexicographically monotonic GH#38502
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 905c2af2d22a5..d97c6a3dacdc3 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -712,6 +712,21 @@ def test_join_datetime_string(self):
)
tm.assert_frame_equal(result, expected)
+ def test_join_with_categorical_index(self):
+ # GH47812
+ ix = ["a", "b"]
+ id1 = pd.CategoricalIndex(ix, categories=ix)
+ id2 = pd.CategoricalIndex(reversed(ix), categories=reversed(ix))
+
+ df1 = DataFrame({"c1": ix}, index=id1)
+ df2 = DataFrame({"c2": reversed(ix)}, index=id2)
+ result = df1.join(df2)
+ expected = DataFrame(
+ {"c1": ["a", "b"], "c2": ["a", "b"]},
+ index=pd.CategoricalIndex(["a", "b"], categories=["a", "b"]),
+ )
+ tm.assert_frame_equal(result, expected)
+
def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix="_y"):
| - [ ] closes #47812(Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47881 | 2022-07-28T01:47:53Z | 2022-07-28T23:05:57Z | 2022-07-28T23:05:57Z | 2022-07-29T00:42:45Z |
DOC: Additions/updates to documentation-GH46359 | diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py
index aa469c2e07726..9c3158b3465b7 100644
--- a/pandas/core/ops/docstrings.py
+++ b/pandas/core/ops/docstrings.py
@@ -461,7 +461,7 @@ def make_flex_doc(op_name: str, typ: str) -> str:
Parameters
----------
-other : scalar, sequence, Series, or DataFrame
+other : scalar, sequence, Series, dict or DataFrame
Any single or multiple element data structure, or list-like object.
axis : {{0 or 'index', 1 or 'columns'}}
Whether to compare by the index (0 or 'index') or columns.
@@ -556,6 +556,20 @@ def make_flex_doc(op_name: str, typ: str) -> str:
triangle 2 179
rectangle 3 359
+Multiply a dictionary by axis.
+
+>>> df.mul({{'angles': 0, 'degrees': 2}})
+ angles degrees
+circle 0 720
+triangle 0 360
+rectangle 0 720
+
+>>> df.mul({{'circle': 0, 'triangle': 2, 'rectangle': 3}}, axis='index')
+ angles degrees
+circle 0 0
+triangle 6 360
+rectangle 12 1080
+
Multiply a DataFrame of different shape with operator version.
>>> other = pd.DataFrame({{'angles': [0, 3, 4]}},
| Added examples of DataFrame.mul with dicts.
- [x] closes #46359
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47877 | 2022-07-27T17:13:43Z | 2022-07-28T17:12:13Z | 2022-07-28T17:12:13Z | 2022-07-28T19:56:30Z |
DOC: fix broken link in getting started tutorials | diff --git a/doc/source/getting_started/intro_tutorials/10_text_data.rst b/doc/source/getting_started/intro_tutorials/10_text_data.rst
index 63db920164ac3..148ac246d7bf8 100644
--- a/doc/source/getting_started/intro_tutorials/10_text_data.rst
+++ b/doc/source/getting_started/intro_tutorials/10_text_data.rst
@@ -179,7 +179,7 @@ applied to integers, so no ``str`` is used.
Based on the index name of the row (``307``) and the column (``Name``),
we can do a selection using the ``loc`` operator, introduced in the
-`tutorial on subsetting <3_subset_data.ipynb>`__.
+:ref:`tutorial on subsetting <10min_tut_03_subset>`.
.. raw:: html
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This PR fixes a link in the getting started tutorial which previously pointed to a non-existing .ipynb file resulting in 404 error. | https://api.github.com/repos/pandas-dev/pandas/pulls/47876 | 2022-07-27T13:39:40Z | 2022-07-27T16:52:06Z | 2022-07-27T16:52:06Z | 2022-07-27T16:52:13Z |
DOC: Minor fixes in the IO user guide | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 7d1aa76613d33..25625dba1080f 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -107,9 +107,10 @@ index_col : int, str, sequence of int / str, or False, optional, default ``None`
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
- Note: ``index_col=False`` can be used to force pandas to *not* use the first
- column as the index, e.g. when you have a malformed file with delimiters at
- the end of each line.
+ .. note::
+ ``index_col=False`` can be used to force pandas to *not* use the first
+ column as the index, e.g. when you have a malformed file with delimiters at
+ the end of each line.
The default value of ``None`` instructs pandas to guess. If the number of
fields in the column header row is equal to the number of fields in the body
@@ -182,15 +183,16 @@ General parsing configuration
+++++++++++++++++++++++++++++
dtype : Type name or dict of column -> type, default ``None``
- Data type for data or columns. E.g. ``{'a': np.float64, 'b': np.int32}``
- (unsupported with ``engine='python'``). Use ``str`` or ``object`` together
- with suitable ``na_values`` settings to preserve and
- not interpret dtype.
+ Data type for data or columns. E.g. ``{'a': np.float64, 'b': np.int32, 'c': 'Int64'}``
+ Use ``str`` or ``object`` together with suitable ``na_values`` settings to preserve
+ and not interpret dtype. If converters are specified, they will be applied INSTEAD
+ of dtype conversion.
+
.. versionadded:: 1.5.0
- Support for defaultdict was added. Specify a defaultdict as input where
- the default determines the dtype of the columns which are not explicitly
- listed.
+ Support for defaultdict was added. Specify a defaultdict as input where
+ the default determines the dtype of the columns which are not explicitly
+ listed.
engine : {``'c'``, ``'python'``, ``'pyarrow'``}
Parser engine to use. The C and pyarrow engines are faster, while the python engine
is currently more feature-complete. Multithreading is currently only supported by
@@ -283,7 +285,9 @@ parse_dates : boolean or list of ints or names or list of lists or dict, default
* If ``[[1, 3]]`` -> combine columns 1 and 3 and parse as a single date
column.
* If ``{'foo': [1, 3]}`` -> parse columns 1, 3 as date and call result 'foo'.
- A fast-path exists for iso8601-formatted dates.
+
+ .. note::
+ A fast-path exists for iso8601-formatted dates.
infer_datetime_format : boolean, default ``False``
If ``True`` and parse_dates is enabled for a column, attempt to infer the
datetime format to speed up the processing.
@@ -1593,8 +1597,10 @@ of multi-columns indices.
pd.read_csv("mi2.csv", header=[0, 1], index_col=0)
-Note: If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it
-with ``df.to_csv(..., index=False)``, then any ``names`` on the columns index will be *lost*.
+.. note::
+ If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it
+ with ``df.to_csv(..., index=False)``, then any ``names`` on the columns index will
+ be *lost*.
.. ipython:: python
:suppress:
| - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Removes a comment on unsupported ``engine='python'`` (added in #14295) in the io user guide (text now in sync with API reference); Additional minor formatting fixes | https://api.github.com/repos/pandas-dev/pandas/pulls/47875 | 2022-07-27T12:16:54Z | 2022-07-27T16:50:50Z | 2022-07-27T16:50:50Z | 2022-07-27T16:51:00Z |
DOC: Add sphinx-toggleprompt and sphinx-copybutton | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 2a6ec8947c8d7..ad83e11171599 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -50,23 +50,25 @@
# sphinxext.
extensions = [
- "sphinx.ext.autodoc",
- "sphinx.ext.autosummary",
- "sphinx.ext.doctest",
- "sphinx.ext.extlinks",
- "sphinx.ext.todo",
- "numpydoc", # handle NumPy documentation formatted docstrings
+ "contributors", # custom pandas extension
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"matplotlib.sphinxext.plot_directive",
- "sphinx.ext.intersphinx",
+ "numpydoc",
+ "sphinx_copybutton",
+ "sphinx_panels",
+ "sphinx_toggleprompt",
+ "sphinx.ext.autodoc",
+ "sphinx.ext.autosummary",
"sphinx.ext.coverage",
- "sphinx.ext.mathjax",
+ "sphinx.ext.doctest",
+ "sphinx.ext.extlinks",
"sphinx.ext.ifconfig",
+ "sphinx.ext.intersphinx",
"sphinx.ext.linkcode",
+ "sphinx.ext.mathjax",
+ "sphinx.ext.todo",
"nbsphinx",
- "sphinx_panels",
- "contributors", # custom pandas extension
]
exclude_patterns = [
@@ -144,6 +146,9 @@
# already loads it
panels_add_bootstrap_css = False
+# https://sphinx-toggleprompt.readthedocs.io/en/stable/#offset
+toggleprompt_offset_right = 35
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ["../_templates"]
diff --git a/environment.yml b/environment.yml
index eb4d53e116927..fd0822b13984e 100644
--- a/environment.yml
+++ b/environment.yml
@@ -103,6 +103,7 @@ dependencies:
- pytest-cython # doctest
- sphinx
- sphinx-panels
+ - sphinx-copybutton
- types-python-dateutil
- types-PyMySQL
- types-pytz
@@ -128,3 +129,4 @@ dependencies:
- jupyterlab >=3.4,<4
- pip:
- jupyterlite==0.1.0b10
+ - sphinx-toggleprompt
diff --git a/requirements-dev.txt b/requirements-dev.txt
index ff410c59b43dd..e1e98ef8ffe23 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -80,6 +80,7 @@ pydata-sphinx-theme==0.8.0
pytest-cython
sphinx
sphinx-panels
+sphinx-copybutton
types-python-dateutil
types-PyMySQL
types-pytz
@@ -98,4 +99,5 @@ pyyaml
requests
jupyterlab >=3.4,<4
jupyterlite==0.1.0b10
+sphinx-toggleprompt
setuptools>=51.0.0
| - [x] closes #47785 (Replace xxxx with the Github issue number)
- [x] closes #26790 (Replace xxxx with the Github issue number)
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Also sorts the sphinx extensions alphabetically.
<img width="763" alt="Screen Shot 2022-07-26 at 5 08 36 PM" src="https://user-images.githubusercontent.com/10647082/181133167-a39efb4a-e744-4271-a8c9-61ae47e494f1.png">
<img width="762" alt="Screen Shot 2022-07-26 at 5 08 47 PM" src="https://user-images.githubusercontent.com/10647082/181133175-c805a6cb-bd2f-4885-9d72-14bea7863694.png">
| https://api.github.com/repos/pandas-dev/pandas/pulls/47870 | 2022-07-27T00:26:49Z | 2022-07-28T18:12:57Z | 2022-07-28T18:12:57Z | 2022-07-28T18:23:44Z |
DOC: Update link from VirtusLab to pandas-dev repo | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index a847624c55eba..166162a4763bf 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -591,12 +591,12 @@ Library Accessor Classes Description
Development tools
-----------------
-`pandas-stubs <https://github.com/VirtusLab/pandas-stubs>`__
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+`pandas-stubs <https://github.com/pandas-dev/pandas-stubs>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
While pandas repository is partially typed, the package itself doesn't expose this information for external use.
Install pandas-stubs to enable basic type coverage of pandas API.
Learn more by reading through :issue:`14468`, :issue:`26766`, :issue:`28142`.
-See installation and usage instructions on the `github page <https://github.com/VirtusLab/pandas-stubs>`__.
+See installation and usage instructions on the `github page <https://github.com/pandas-dev/pandas-stubs>`__.
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
According to https://github.com/VirtusLab/pandas-stubs:
> As of July 2022 pandas_stubs package is no longer sourced from here but instead from a repository owned and maintained by the core pandas team: https://github.com/pandas-dev/pandas-stubs
This PR updates the links in the documentation to reflect this change. | https://api.github.com/repos/pandas-dev/pandas/pulls/47869 | 2022-07-27T00:02:52Z | 2022-07-27T16:53:23Z | 2022-07-27T16:53:23Z | 2022-07-27T16:53:29Z |
DOC: Adds Hamilton as a development tool | diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md
index 1d77c596c1eb0..89d243555a358 100644
--- a/web/pandas/community/ecosystem.md
+++ b/web/pandas/community/ecosystem.md
@@ -400,3 +400,20 @@ Learn more by reading through these issues [14468](https://github.com/pandas-dev
[26766](https://github.com/pandas-dev/pandas/issues/26766), [28142](https://github.com/pandas-dev/pandas/issues/28142).
See installation and usage instructions on the [github page](https://github.com/VirtusLab/pandas-stubs).
+
+### [Hamilton](https://github.com/stitchfix/hamilton)
+
+Hamilton is a declarative dataflow framework that came out of Stitch Fix. It was designed to help one manage a
+Pandas code base, specifically with respect to feature engineering for machine learning models.
+
+It prescibes an opinionated paradigm, that ensures all code is:
+
+* unit testable
+* integration testing friendly
+* documentation friendly
+* transformation logic is reuseable, as it is decoupled from the context of where it is used.
+* integrateable with runtime data quality checks.
+
+This helps one to scale your pandas code base, at the same time, keeping maintenance costs low.
+
+For more information, see [documentation](https://hamilton-docs.gitbook.io/).
| Hamilton is a paradigm that helps one manage a pandas code base in an opinionated manner.
We have had great success with it at Stitch Fix, and it's got a growing open source
following. I think it helps one to write pandas in a way that gives
confidence if you want to run Pandas in production jobs.
- [x] N/A: closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47865 | 2022-07-26T18:42:05Z | 2022-07-27T16:55:39Z | 2022-07-27T16:55:39Z | 2022-07-29T20:24:39Z |
ENH: `Styler.relabel_index` for directly specifying display of index labels | diff --git a/doc/source/reference/style.rst b/doc/source/reference/style.rst
index 77e1b0abae0c4..5144f12fa373a 100644
--- a/doc/source/reference/style.rst
+++ b/doc/source/reference/style.rst
@@ -41,6 +41,7 @@ Style application
Styler.applymap_index
Styler.format
Styler.format_index
+ Styler.relabel_index
Styler.hide
Styler.concat
Styler.set_td_classes
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 7f07187e34c78..d7117095d2321 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -44,6 +44,9 @@ e.g. totals and counts etc. (:issue:`43875`, :issue:`46186`)
Additionally there is an alternative output method :meth:`.Styler.to_string`,
which allows using the Styler's formatting methods to create, for example, CSVs (:issue:`44502`).
+A new feature :meth:`.Styler.relabel_index` is also made available to provide full customisation of the display of
+index or column headers (:issue:`47864`)
+
Minor feature improvements are:
- Adding the ability to render ``border`` and ``border-{side}`` CSS properties in Excel (:issue:`42276`)
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 65720e675a77a..d0a1cb6781e9f 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -22,7 +22,10 @@
from pandas._config import get_option
from pandas._libs import lib
-from pandas._typing import Level
+from pandas._typing import (
+ Axis,
+ Level,
+)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.common import (
@@ -1345,6 +1348,164 @@ def format_index(
return self
+ def relabel_index(
+ self,
+ labels: Sequence | Index,
+ axis: Axis = 0,
+ level: Level | list[Level] | None = None,
+ ) -> StylerRenderer:
+ r"""
+ Relabel the index, or column header, keys to display a set of specified values.
+
+ .. versionadded:: 1.5.0
+
+ Parameters
+ ----------
+ labels : list-like or Index
+ New labels to display. Must have same length as the underlying values not
+ hidden.
+ axis : {"index", 0, "columns", 1}
+ Apply to the index or columns.
+ level : int, str, list, optional
+ The level(s) over which to apply the new labels. If `None` will apply
+ to all levels of an Index or MultiIndex which are not hidden.
+
+ Returns
+ -------
+ self : Styler
+
+ See Also
+ --------
+ Styler.format_index: Format the text display value of index or column headers.
+ Styler.hide: Hide the index, column headers, or specified data from display.
+
+ Notes
+ -----
+ As part of Styler, this method allows the display of an index to be
+ completely user-specified without affecting the underlying DataFrame data,
+ index, or column headers. This means that the flexibility of indexing is
+ maintained whilst the final display is customisable.
+
+ Since Styler is designed to be progressively constructed with method chaining,
+ this method is adapted to react to the **currently specified hidden elements**.
+ This is useful because it means one does not have to specify all the new
+ labels if the majority of an index, or column headers, have already been hidden.
+ The following produce equivalent display (note the length of ``labels`` in
+ each case).
+
+ .. code-block:: python
+
+ # relabel first, then hide
+ df = pd.DataFrame({"col": ["a", "b", "c"]})
+ df.style.relabel_index(["A", "B", "C"]).hide([0,1])
+ # hide first, then relabel
+ df = pd.DataFrame({"col": ["a", "b", "c"]})
+ df.style.hide([0,1]).relabel_index(["C"])
+
+ This method should be used, rather than :meth:`Styler.format_index`, in one of
+ the following cases (see examples):
+
+ - A specified set of labels are required which are not a function of the
+ underlying index keys.
+ - The function of the underlying index keys requires a counter variable,
+ such as those available upon enumeration.
+
+ Examples
+ --------
+ Basic use
+
+ >>> df = pd.DataFrame({"col": ["a", "b", "c"]})
+ >>> df.style.relabel_index(["A", "B", "C"]) # doctest: +SKIP
+ col
+ A a
+ B b
+ C c
+
+ Chaining with pre-hidden elements
+
+ >>> df.style.hide([0,1]).relabel_index(["C"]) # doctest: +SKIP
+ col
+ C c
+
+ Using a MultiIndex
+
+ >>> midx = pd.MultiIndex.from_product([[0, 1], [0, 1], [0, 1]])
+ >>> df = pd.DataFrame({"col": list(range(8))}, index=midx)
+ >>> styler = df.style # doctest: +SKIP
+ col
+ 0 0 0 0
+ 1 1
+ 1 0 2
+ 1 3
+ 1 0 0 4
+ 1 5
+ 1 0 6
+ 1 7
+ >>> styler.hide((midx.get_level_values(0)==0)|(midx.get_level_values(1)==0))
+ ... # doctest: +SKIP
+ >>> styler.hide(level=[0,1]) # doctest: +SKIP
+ >>> styler.relabel_index(["binary6", "binary7"]) # doctest: +SKIP
+ col
+ binary6 6
+ binary7 7
+
+ We can also achieve the above by indexing first and then re-labeling
+
+ >>> styler = df.loc[[(1,1,0), (1,1,1)]].style
+ >>> styler.hide(level=[0,1]).relabel_index(["binary6", "binary7"])
+ ... # doctest: +SKIP
+ col
+ binary6 6
+ binary7 7
+
+ Defining a formatting function which uses an enumeration counter. Also note
+ that the value of the index key is passed in the case of string labels so it
+ can also be inserted into the label, using curly brackets (or double curly
+ brackets if the string if pre-formatted),
+
+ >>> df = pd.DataFrame({"samples": np.random.rand(10)})
+ >>> styler = df.loc[np.random.randint(0,10,3)].style
+ >>> styler.relabel_index([f"sample{i+1} ({{}})" for i in range(3)])
+ ... # doctest: +SKIP
+ samples
+ sample1 (5) 0.315811
+ sample2 (0) 0.495941
+ sample3 (2) 0.067946
+ """
+ axis = self.data._get_axis_number(axis)
+ if axis == 0:
+ display_funcs_, obj = self._display_funcs_index, self.index
+ hidden_labels, hidden_lvls = self.hidden_rows, self.hide_index_
+ else:
+ display_funcs_, obj = self._display_funcs_columns, self.columns
+ hidden_labels, hidden_lvls = self.hidden_columns, self.hide_columns_
+ visible_len = len(obj) - len(set(hidden_labels))
+ if len(labels) != visible_len:
+ raise ValueError(
+ "``labels`` must be of length equal to the number of "
+ f"visible labels along ``axis`` ({visible_len})."
+ )
+
+ if level is None:
+ level = [i for i in range(obj.nlevels) if not hidden_lvls[i]]
+ levels_ = refactor_levels(level, obj)
+
+ def alias_(x, value):
+ if isinstance(value, str):
+ return value.format(x)
+ return value
+
+ for ai, i in enumerate([i for i in range(len(obj)) if i not in hidden_labels]):
+ if len(levels_) == 1:
+ idx = (i, levels_[0]) if axis == 0 else (levels_[0], i)
+ display_funcs_[idx] = partial(alias_, value=labels[ai])
+ else:
+ for aj, lvl in enumerate(levels_):
+ idx = (i, lvl) if axis == 0 else (lvl, i)
+ display_funcs_[idx] = partial(alias_, value=labels[ai][aj])
+
+ return self
+
def _element(
html_element: str,
diff --git a/pandas/tests/io/formats/style/test_format.py b/pandas/tests/io/formats/style/test_format.py
index a52c679e16ad5..0b114ea128b0b 100644
--- a/pandas/tests/io/formats/style/test_format.py
+++ b/pandas/tests/io/formats/style/test_format.py
@@ -30,6 +30,20 @@ def styler(df):
return Styler(df, uuid_len=0)
+@pytest.fixture
+def df_multi():
+ return DataFrame(
+ data=np.arange(16).reshape(4, 4),
+ columns=MultiIndex.from_product([["A", "B"], ["a", "b"]]),
+ index=MultiIndex.from_product([["X", "Y"], ["x", "y"]]),
+ )
+
+
+@pytest.fixture
+def styler_multi(df_multi):
+ return Styler(df_multi, uuid_len=0)
+
+
def test_display_format(styler):
ctx = styler.format("{:0.1f}")._translate(True, True)
assert all(["display_value" in c for c in row] for row in ctx["body"])
@@ -442,3 +456,46 @@ def test_boolean_format():
ctx = df.style._translate(True, True)
assert ctx["body"][0][1]["display_value"] is True
assert ctx["body"][0][2]["display_value"] is False
+
+
+@pytest.mark.parametrize(
+ "hide, labels",
+ [
+ (False, [1, 2]),
+ (True, [1, 2, 3, 4]),
+ ],
+)
+def test_relabel_raise_length(styler_multi, hide, labels):
+ if hide:
+ styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")])
+ with pytest.raises(ValueError, match="``labels`` must be of length equal"):
+ styler_multi.relabel_index(labels=labels)
+
+
+def test_relabel_index(styler_multi):
+ labels = [(1, 2), (3, 4)]
+ styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")])
+ styler_multi.relabel_index(labels=labels)
+ ctx = styler_multi._translate(True, True)
+ assert {"value": "X", "display_value": 1}.items() <= ctx["body"][0][0].items()
+ assert {"value": "y", "display_value": 2}.items() <= ctx["body"][0][1].items()
+ assert {"value": "Y", "display_value": 3}.items() <= ctx["body"][1][0].items()
+ assert {"value": "x", "display_value": 4}.items() <= ctx["body"][1][1].items()
+
+
+def test_relabel_columns(styler_multi):
+ labels = [(1, 2), (3, 4)]
+ styler_multi.hide(axis=1, subset=[("A", "a"), ("B", "b")])
+ styler_multi.relabel_index(axis=1, labels=labels)
+ ctx = styler_multi._translate(True, True)
+ assert {"value": "A", "display_value": 1}.items() <= ctx["head"][0][3].items()
+ assert {"value": "B", "display_value": 3}.items() <= ctx["head"][0][4].items()
+ assert {"value": "b", "display_value": 2}.items() <= ctx["head"][1][3].items()
+ assert {"value": "a", "display_value": 4}.items() <= ctx["head"][1][4].items()
+
+
+def test_relabel_roundtrip(styler):
+ styler.relabel_index(["{}", "{}"])
+ ctx = styler._translate(True, True)
+ assert {"value": "x", "display_value": "x"}.items() <= ctx["body"][0][0].items()
+ assert {"value": "y", "display_value": "y"}.items() <= ctx["body"][1][0].items()
|
<img width="645" alt="Screenshot 2022-07-26 at 18 10 00" src="https://user-images.githubusercontent.com/24256554/181057085-676c1987-25f7-44b4-9f63-5f7479354e5e.png">
<img width="627" alt="Screenshot 2022-07-26 at 18 10 13" src="https://user-images.githubusercontent.com/24256554/181057115-a143111d-b51d-472b-93f7-5113f74a77d5.png">
<img width="616" alt="Screenshot 2022-07-26 at 18 10 23" src="https://user-images.githubusercontent.com/24256554/181057131-d531de9e-947d-4d66-9060-105de858c498.png">
| https://api.github.com/repos/pandas-dev/pandas/pulls/47864 | 2022-07-26T16:13:38Z | 2022-08-02T20:39:25Z | 2022-08-02T20:39:24Z | 2022-08-02T20:45:49Z |
Added improvements in to_datetime Error reporting message - incorrect field today/now shown in error | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index b3e51191e8efa..fe4078f611f7e 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -799,6 +799,7 @@ cdef _array_to_datetime_object(
# We return an object array and only attempt to parse:
# 1) NaT or NaT-like values
# 2) datetime strings, which we return as datetime.datetime
+ # 3) special strings - "now" & "today"
for i in range(n):
val = values[i]
if checknull_with_nat_and_na(val) or PyDateTime_Check(val):
@@ -817,7 +818,8 @@ cdef _array_to_datetime_object(
yearfirst=yearfirst)
pydatetime_to_dt64(oresult[i], &dts)
check_dts_bounds(&dts)
- except (ValueError, OverflowError):
+ except (ValueError, OverflowError) as ex:
+ ex.args = (f"{ex} present at position {i}", )
if is_coerce:
oresult[i] = <object>NaT
continue
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 97a8f81094a8f..1d5bbf87090eb 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -298,6 +298,14 @@ def parse_datetime_string(
if dt is not None:
return dt
+ # Handling special case strings today & now
+ if date_string == "now":
+ dt = datetime.now()
+ return dt
+ elif date_string == "today":
+ dt = datetime.today()
+ return dt
+
try:
dt, _ = _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq=None)
return dt
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index b128838318ac0..f3824d0bf3ea2 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -2591,6 +2591,13 @@ def test_invalid_origins_tzinfo(self):
with pytest.raises(ValueError, match="must be tz-naive"):
to_datetime(1, unit="D", origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
+ def test_incorrect_value_exception(self):
+ # GH47495
+ with pytest.raises(
+ ValueError, match="Unknown string format: yesterday present at position 1"
+ ):
+ to_datetime(["today", "yesterday"])
+
@pytest.mark.parametrize("format", [None, "%Y-%m-%d %H:%M:%S"])
def test_to_datetime_out_of_bounds_with_format_arg(self, format):
# see gh-23830
| - closes #47495
Splitting changes from #47597, This PR aims to close the bug #47495
- Changes description
- For issue #47495- made change in `pandas/_libs/tslib.pyx` to accept "now" and "today" values. These values are accepted in other areas of parsing (` _parse_today_now` in `pandas/_libs/tslib.pyx` )
- Example of changed error messages
| Code | Old Error ( on 1.4.3 ) | New Error |
| ------------- | ------------- | ------------- |
| pd.to_datetime(["today", "yesterday"]) | dateutil.parser._parser.ParserError: Unknown string format: today | dateutil.parser._parser.ParserError: Unknown string format: yesterday present at position 1 |
| https://api.github.com/repos/pandas-dev/pandas/pulls/47860 | 2022-07-26T09:59:40Z | 2022-08-15T17:07:10Z | 2022-08-15T17:07:10Z | 2022-08-15T17:24:50Z |
DOC: correct links of Plotly | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 256c3ee36e80c..a847624c55eba 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -161,10 +161,10 @@ A good implementation for Python users is `has2k1/plotnine <https://github.com/h
`IPython Vega <https://github.com/vega/ipyvega>`__ leverages `Vega
<https://github.com/vega/vega>`__ to create plots within Jupyter Notebook.
-`Plotly <https://poltly.com/python>`__
+`Plotly <https://plotly.com/python>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-`Plotly’s <https://poltly.com/>`__ `Python API <https://poltly.com/python/>`__ enables interactive figures and web shareability. Maps, 2D, 3D, and live-streaming graphs are rendered with WebGL and `D3.js <https://d3js.org/>`__. The library supports plotting directly from a pandas DataFrame and cloud-based collaboration. Users of `matplotlib, ggplot for Python, and Seaborn <https://poltly.com/python/matplotlib-to-plotly-tutorial/>`__ can convert figures into interactive web-based plots. Plots can be drawn in `IPython Notebooks <https://plotly.com/ipython-notebooks/>`__ , edited with R or MATLAB, modified in a GUI, or embedded in apps and dashboards. Plotly is free for unlimited sharing, and has `offline <https://poltly.com/python/offline/>`__, or `on-premise <https://poltly.com/product/enterprise/>`__ accounts for private use.
+`Plotly’s <https://plotly.com/>`__ `Python API <https://plotly.com/python/>`__ enables interactive figures and web shareability. Maps, 2D, 3D, and live-streaming graphs are rendered with WebGL and `D3.js <https://d3js.org/>`__. The library supports plotting directly from a pandas DataFrame and cloud-based collaboration. Users of `matplotlib, ggplot for Python, and Seaborn <https://plotly.com/python/matplotlib-to-plotly-tutorial/>`__ can convert figures into interactive web-based plots. Plots can be drawn in `IPython Notebooks <https://plotly.com/ipython-notebooks/>`__ , edited with R or MATLAB, modified in a GUI, or embedded in apps and dashboards. Plotly is free for unlimited sharing, and has `offline <https://plotly.com/python/offline/>`__, or `on-premise <https://plotly.com/product/enterprise/>`__ accounts for private use.
`Lux <https://github.com/lux-org/lux>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| https://poltly.com/ -> https://plotly.com/
- [x] closes #47857
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47858 | 2022-07-26T07:41:16Z | 2022-07-26T18:06:07Z | 2022-07-26T18:06:07Z | 2022-07-27T01:11:57Z |
DOC/ENH: Add documentation and whatsnew entry for ArrowDtype and ArrowExtensionArray | diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst
index 1b8e0fdb856b5..6d09e10f284af 100644
--- a/doc/source/reference/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -19,19 +19,20 @@ objects contained with a :class:`Index`, :class:`Series`, or
For some data types, pandas extends NumPy's type system. String aliases for these types
can be found at :ref:`basics.dtypes`.
-=================== ========================= ================== =============================
-Kind of Data pandas Data Type Scalar Array
-=================== ========================= ================== =============================
-TZ-aware datetime :class:`DatetimeTZDtype` :class:`Timestamp` :ref:`api.arrays.datetime`
-Timedeltas (none) :class:`Timedelta` :ref:`api.arrays.timedelta`
-Period (time spans) :class:`PeriodDtype` :class:`Period` :ref:`api.arrays.period`
-Intervals :class:`IntervalDtype` :class:`Interval` :ref:`api.arrays.interval`
-Nullable Integer :class:`Int64Dtype`, ... (none) :ref:`api.arrays.integer_na`
-Categorical :class:`CategoricalDtype` (none) :ref:`api.arrays.categorical`
-Sparse :class:`SparseDtype` (none) :ref:`api.arrays.sparse`
-Strings :class:`StringDtype` :class:`str` :ref:`api.arrays.string`
-Boolean (with NA) :class:`BooleanDtype` :class:`bool` :ref:`api.arrays.bool`
-=================== ========================= ================== =============================
+=================== ========================= ============================= =============================
+Kind of Data pandas Data Type Scalar Array
+=================== ========================= ============================= =============================
+TZ-aware datetime :class:`DatetimeTZDtype` :class:`Timestamp` :ref:`api.arrays.datetime`
+Timedeltas (none) :class:`Timedelta` :ref:`api.arrays.timedelta`
+Period (time spans) :class:`PeriodDtype` :class:`Period` :ref:`api.arrays.period`
+Intervals :class:`IntervalDtype` :class:`Interval` :ref:`api.arrays.interval`
+Nullable Integer :class:`Int64Dtype`, ... (none) :ref:`api.arrays.integer_na`
+Categorical :class:`CategoricalDtype` (none) :ref:`api.arrays.categorical`
+Sparse :class:`SparseDtype` (none) :ref:`api.arrays.sparse`
+Strings :class:`StringDtype` :class:`str` :ref:`api.arrays.string`
+Boolean (with NA) :class:`BooleanDtype` :class:`bool` :ref:`api.arrays.bool`
+PyArrow :class:`ArrowDtype` Python Scalars or :class:`NA` :ref:`api.arrays.arrow`
+=================== ========================= ============================= =============================
pandas and third-party libraries can extend NumPy's type system (see :ref:`extending.extension-types`).
The top-level :meth:`array` method can be used to create a new array, which may be
@@ -42,6 +43,44 @@ stored in a :class:`Series`, :class:`Index`, or as a column in a :class:`DataFra
array
+.. _api.arrays.arrow:
+
+PyArrow
+-------
+
+.. warning::
+
+ This feature is experimental, and the API can change in a future release without warning.
+
+The :class:`arrays.ArrowExtensionArray` is backed by a :external+pyarrow:py:class:`pyarrow.ChunkedArray` with a
+:external+pyarrow:py:class:`pyarrow.DataType` instead of a NumPy array and data type. The ``.dtype`` of a :class:`arrays.ArrowExtensionArray`
+is an :class:`ArrowDtype`.
+
+`Pyarrow <https://arrow.apache.org/docs/python/index.html>`__ provides similar array and `data type <https://arrow.apache.org/docs/python/api/datatypes.html>`__
+support as NumPy including first-class nullability support for all data types, immutability and more.
+
+.. note::
+
+ For string types (``pyarrow.string()``, ``string[pyarrow]``), PyArrow support is still facilitated
+ by :class:`arrays.ArrowStringArray` and ``StringDtype("pyarrow")``. See the :ref:`string section <api.arrays.string>`
+ below.
+
+While individual values in an :class:`arrays.ArrowExtensionArray` are stored as a PyArrow objects, scalars are **returned**
+as Python scalars corresponding to the data type, e.g. a PyArrow int64 will be returned as Python int, or :class:`NA` for missing
+values.
+
+.. autosummary::
+ :toctree: api/
+ :template: autosummary/class_without_autosummary.rst
+
+ arrays.ArrowExtensionArray
+
+.. autosummary::
+ :toctree: api/
+ :template: autosummary/class_without_autosummary.rst
+
+ ArrowDtype
+
.. _api.arrays.datetime:
Datetimes
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index bcbe2c6d8b104..f0eb73edda332 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -24,6 +24,40 @@ https://github.com/pandas-dev/pandas-stubs for more information.
We thank VirtusLab and Microsoft for their initial, significant contributions to ``pandas-stubs``
+.. _whatsnew_150.enhancements.arrow:
+
+Native PyArrow-backed ExtensionArray
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+With `Pyarrow <https://arrow.apache.org/docs/python/index.html>`__ installed, users can now create pandas objects
+that are backed by a ``pyarrow.ChunkedArray`` and ``pyarrow.DataType``.
+
+The ``dtype`` argument can accept a string of a `pyarrow data type <https://arrow.apache.org/docs/python/api/datatypes.html>`__
+with ``pyarrow`` in brackets e.g. ``"int64[pyarrow]"`` or, for pyarrow data types that take parameters, a :class:`ArrowDtype`
+initialized with a ``pyarrow.DataType``.
+
+.. ipython:: python
+
+ import pyarrow as pa
+ ser_float = pd.Series([1.0, 2.0, None], dtype="float32[pyarrow]")
+ ser_float
+
+ list_of_int_type = pd.ArrowDtype(pa.list_(pa.int64()))
+ ser_list = pd.Series([[1, 2], [3, None]], dtype=list_of_int_type)
+ ser_list
+
+ ser_list.take([1, 0])
+ ser_float * 5
+ ser_float.mean()
+ ser_float.dropna()
+
+Most operations are supported and have been implemented using `pyarrow compute <https://arrow.apache.org/docs/python/api/compute.html>`__ functions.
+We recommend installing the latest version of PyArrow to access the most recently implemented compute functions.
+
+.. warning::
+
+ This feature is experimental, and the API can change in a future release without warning.
+
.. _whatsnew_150.enhancements.dataframe_interchange:
DataFrame interchange protocol implementation
diff --git a/pandas/arrays/__init__.py b/pandas/arrays/__init__.py
index 89d362eb77e68..3a8e80a6b5d2b 100644
--- a/pandas/arrays/__init__.py
+++ b/pandas/arrays/__init__.py
@@ -4,6 +4,7 @@
See :ref:`extending.extension-types` for more.
"""
from pandas.core.arrays import (
+ ArrowExtensionArray,
ArrowStringArray,
BooleanArray,
Categorical,
@@ -19,6 +20,7 @@
)
__all__ = [
+ "ArrowExtensionArray",
"ArrowStringArray",
"BooleanArray",
"Categorical",
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 8d80cb18d21db..1f7939011a1f1 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -159,8 +159,47 @@ def to_pyarrow_type(
class ArrowExtensionArray(OpsMixin, ExtensionArray):
"""
- Base class for ExtensionArray backed by Arrow ChunkedArray.
- """
+ Pandas ExtensionArray backed by a PyArrow ChunkedArray.
+
+ .. warning::
+
+ ArrowExtensionArray is considered experimental. The implementation and
+ parts of the API may change without warning.
+
+ Parameters
+ ----------
+ values : pyarrow.Array or pyarrow.ChunkedArray
+
+ Attributes
+ ----------
+ None
+
+ Methods
+ -------
+ None
+
+ Returns
+ -------
+ ArrowExtensionArray
+
+ Notes
+ -----
+ Most methods are implemented using `pyarrow compute functions. <https://arrow.apache.org/docs/python/api/compute.html>`__
+ Some methods may either raise an exception or raise a ``PerformanceWarning`` if an
+ associated compute function is not available based on the installed version of PyArrow.
+
+ Please install the latest version of PyArrow to enable the best functionality and avoid
+ potential bugs in prior versions of PyArrow.
+
+ Examples
+ --------
+ Create an ArrowExtensionArray with :func:`pandas.array`:
+
+ >>> pd.array([1, 1, None], dtype="int64[pyarrow]")
+ <ArrowExtensionArray>
+ [1, 1, <NA>]
+ Length: 3, dtype: int64[pyarrow]
+ """ # noqa: E501 (http link too long)
_data: pa.ChunkedArray
_dtype: ArrowDtype
diff --git a/pandas/core/arrays/arrow/dtype.py b/pandas/core/arrays/arrow/dtype.py
index 523e031c220e4..48e2c5bdda2f8 100644
--- a/pandas/core/arrays/arrow/dtype.py
+++ b/pandas/core/arrays/arrow/dtype.py
@@ -20,9 +20,47 @@
@register_extension_dtype
class ArrowDtype(StorageExtensionDtype):
"""
- Base class for dtypes for ArrowExtensionArray.
- Modeled after BaseMaskedDtype
- """
+ An ExtensionDtype for PyArrow data types.
+
+ .. warning::
+
+ ArrowDtype is considered experimental. The implementation and
+ parts of the API may change without warning.
+
+ While most ``dtype`` arguments can accept the "string"
+ constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful
+ if the data type contains parameters like ``pyarrow.timestamp``.
+
+ Parameters
+ ----------
+ pyarrow_dtype : pa.DataType
+ An instance of a `pyarrow.DataType <https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions>`__.
+
+ Attributes
+ ----------
+ pyarrow_dtype
+
+ Methods
+ -------
+ None
+
+ Returns
+ -------
+ ArrowDtype
+
+ Examples
+ --------
+ >>> import pyarrow as pa
+ >>> pd.ArrowDtype(pa.int64())
+ int64[pyarrow]
+
+ Types with parameters must be constructed with ArrowDtype.
+
+ >>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York"))
+ timestamp[s, tz=America/New_York][pyarrow]
+ >>> pd.ArrowDtype(pa.list_(pa.int64()))
+ list<item: int64>[pyarrow]
+ """ # noqa: E501
_metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment]
@@ -37,6 +75,9 @@ def __init__(self, pyarrow_dtype: pa.DataType) -> None:
)
self.pyarrow_dtype = pyarrow_dtype
+ def __repr__(self) -> str:
+ return self.name
+
@property
def type(self):
"""
diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py
index 9aca47dbddbf2..e7233484e16b6 100755
--- a/scripts/validate_rst_title_capitalization.py
+++ b/scripts/validate_rst_title_capitalization.py
@@ -150,6 +150,7 @@
"LZMA",
"Numba",
"Timestamp",
+ "PyArrow",
}
CAP_EXCEPTIONS_DICT = {word.lower(): word for word in CAPITALIZATION_EXCEPTIONS}
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
* Depends on https://github.com/pandas-dev/pandas/pull/47818
* Should I add a dedicated user-guide page for this functionality? | https://api.github.com/repos/pandas-dev/pandas/pulls/47854 | 2022-07-26T01:16:18Z | 2022-08-18T20:13:58Z | 2022-08-18T20:13:58Z | 2022-09-08T18:41:46Z |
CLN/DOC: Remove sphinx referencing the wiki | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 2a6ec8947c8d7..81ff14d33758a 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -453,7 +453,6 @@
# extlinks alias
extlinks = {
"issue": ("https://github.com/pandas-dev/pandas/issues/%s", "GH"),
- "wiki": ("https://github.com/pandas-dev/pandas/wiki/%s", "wiki "),
}
diff --git a/web/pandas/about/roadmap.md b/web/pandas/about/roadmap.md
index 35a6b3361f32e..3c6c4d4fdf9a2 100644
--- a/web/pandas/about/roadmap.md
+++ b/web/pandas/about/roadmap.md
@@ -103,9 +103,52 @@ uses label-based, rather than position-based, indexing. We propose that
it should only work with positional indexing, and the translation of
keys to positions should be entirely done at a higher level.
-Indexing is a complicated API with many subtleties. This refactor will
-require care and attention. More details are discussed at
-<https://github.com/pandas-dev/pandas/wiki/(Tentative)-rules-for-restructuring-indexing-code>
+Indexing is a complicated API with many subtleties. This refactor will require care
+and attention. The following principles should inspire refactoring of indexing code and
+should result on cleaner, simpler, and more performant code.
+
+1. Label indexing must never involve looking in an axis twice for the same label(s).
+This implies that any validation step must either:
+
+ * limit validation to general features (e.g. dtype/structure of the key/index), or
+ * reuse the result for the actual indexing.
+
+2. Indexers must never rely on an explicit call to other indexers.
+For instance, it is OK to have some internal method of `.loc` call some
+internal method of `__getitem__` (or of their common base class),
+but never in the code flow of `.loc` should `the_obj[something]` appear.
+
+3. Execution of positional indexing must never involve labels (as currently, sadly, happens).
+That is, the code flow of a getter call (or a setter call in which the right hand side is non-indexed)
+to `.iloc` should never involve the axes of the object in any way.
+
+4. Indexing must never involve accessing/modifying values (i.e., act on `._data` or `.values`) more than once.
+The following steps must hence be clearly decoupled:
+
+ * find positions we need to access/modify on each axis
+ * (if we are accessing) derive the type of object we need to return (dimensionality)
+ * actually access/modify the values
+ * (if we are accessing) construct the return object
+
+5. As a corollary to the decoupling between 4.i and 4.iii, any code which deals on how data is stored
+(including any combination of handling multiple dtypes, and sparse storage, categoricals, third-party types)
+must be independent from code that deals with identifying affected rows/columns,
+and take place only once step 4.i is completed.
+
+ * In particular, such code should most probably not live in `pandas/core/indexing.py`
+ * ... and must not depend in any way on the type(s) of axes (e.g. no `MultiIndex` special cases)
+
+6. As a corollary to point 1.i, `Index` (sub)classes must provide separate methods for any desired validity check of label(s) which does not involve actual lookup,
+on the one side, and for any required conversion/adaptation/lookup of label(s), on the other.
+
+7. Use of trial and error should be limited, and anyway restricted to catch only exceptions
+which are actually expected (typically `KeyError`).
+
+ * In particular, code should never (intentionally) raise new exceptions in the `except` portion of a `try... exception`
+
+8. Any code portion which is not specific to setters and getters must be shared,
+and when small differences in behavior are expected (e.g. getting with `.loc` raises for
+missing labels, setting still doesn't), they can be managed with a specific parameter.
## Numba-accelerated operations
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Since the wiki is disabled, this reference shouldn't be made in the docs
| https://api.github.com/repos/pandas-dev/pandas/pulls/47853 | 2022-07-25T21:56:05Z | 2022-07-28T17:33:54Z | 2022-07-28T17:33:54Z | 2022-07-28T17:47:48Z |
DEPR: args and kwargs in rolling, expanding, and ewm ops | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 7f07187e34c78..8ba17097adbbd 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -773,6 +773,7 @@ Other Deprecations
- Deprecated :class:`Series` and :class:`Resampler` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) raising a ``NotImplementedError`` when the dtype is non-numric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`)
- Deprecated :meth:`Series.rank` returning an empty result when the dtype is non-numeric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`)
- Deprecated argument ``errors`` for :meth:`Series.mask`, :meth:`Series.where`, :meth:`DataFrame.mask`, and :meth:`DataFrame.where` as ``errors`` had no effect on this methods (:issue:`47728`)
+- Deprecated arguments ``*args`` and ``**kwargs`` in :class:`Rolling`, :class:`Expanding`, and :class:`ExponentialMovingWindow` ops. (:issue:`47836`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.performance:
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index ed2a4002f5ce7..e31b5c60a37ee 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -3,9 +3,12 @@
from collections import defaultdict
from typing import cast
+import warnings
import numpy as np
+from pandas.util._exceptions import find_stack_level
+
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
@@ -167,3 +170,38 @@ def prep_binary(arg1, arg2):
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
+
+
+def maybe_warn_args_and_kwargs(cls, kernel: str, args, kwargs) -> None:
+ """
+ Warn for deprecation of args and kwargs in rolling/expanding functions.
+
+ Parameters
+ ----------
+ cls : type
+ Class to warn about.
+ kernel : str
+ Operation name.
+ args : tuple or None
+ args passed by user. Will be None if and only if kernel does not have args.
+ kwargs : dict or None
+ kwargs passed by user. Will be None if and only if kernel does not have kwargs.
+ """
+ warn_args = args is not None and len(args) > 0
+ warn_kwargs = kwargs is not None and len(kwargs) > 0
+ if warn_args and warn_kwargs:
+ msg = "args and kwargs"
+ elif warn_args:
+ msg = "args"
+ elif warn_kwargs:
+ msg = "kwargs"
+ else:
+ msg = ""
+ if msg != "":
+ warnings.warn(
+ f"Passing additional {msg} to {cls.__name__}.{kernel} has "
+ "no impact on the result and is deprecated. This will "
+ "raise a TypeError in a future version of pandas.",
+ category=FutureWarning,
+ stacklevel=find_stack_level(),
+ )
diff --git a/pandas/core/window/doc.py b/pandas/core/window/doc.py
index 4fe08e2fa20b3..835085d41cffa 100644
--- a/pandas/core/window/doc.py
+++ b/pandas/core/window/doc.py
@@ -43,14 +43,18 @@ def create_section_header(header: str) -> str:
args_compat = dedent(
"""
*args
- For NumPy compatibility and will not have an effect on the result.\n
+ For NumPy compatibility and will not have an effect on the result.
+
+ .. deprecated:: 1.5.0\n
"""
).replace("\n", "", 1)
kwargs_compat = dedent(
"""
**kwargs
- For NumPy compatibility and will not have an effect on the result.\n
+ For NumPy compatibility and will not have an effect on the result.
+
+ .. deprecated:: 1.5.0\n
"""
).replace("\n", "", 1)
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 3a42a4b1a1663..020ca71050015 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -42,7 +42,10 @@
get_jit_arguments,
maybe_use_numba,
)
-from pandas.core.window.common import zsqrt
+from pandas.core.window.common import (
+ maybe_warn_args_and_kwargs,
+ zsqrt,
+)
from pandas.core.window.doc import (
_shared_docs,
args_compat,
@@ -546,6 +549,7 @@ def mean(
engine_kwargs=None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "mean", args, kwargs)
if maybe_use_numba(engine):
if self.method == "single":
func = generate_numba_ewm_func
@@ -603,6 +607,7 @@ def sum(
engine_kwargs=None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "sum", args, kwargs)
if not self.adjust:
raise NotImplementedError("sum is not implemented with adjust=False")
if maybe_use_numba(engine):
@@ -658,6 +663,7 @@ def sum(
agg_method="std",
)
def std(self, bias: bool = False, numeric_only: bool = False, *args, **kwargs):
+ maybe_warn_args_and_kwargs(type(self), "std", args, kwargs)
nv.validate_window_func("std", args, kwargs)
if (
numeric_only
@@ -702,6 +708,7 @@ def vol(self, bias: bool = False, *args, **kwargs):
agg_method="var",
)
def var(self, bias: bool = False, numeric_only: bool = False, *args, **kwargs):
+ maybe_warn_args_and_kwargs(type(self), "var", args, kwargs)
nv.validate_window_func("var", args, kwargs)
window_func = window_aggregations.ewmcov
wfunc = partial(
@@ -756,6 +763,7 @@ def cov(
):
from pandas import Series
+ maybe_warn_args_and_kwargs(type(self), "cov", None, kwargs)
self._validate_numeric_only("cov", numeric_only)
def cov_func(x, y):
@@ -829,6 +837,7 @@ def corr(
):
from pandas import Series
+ maybe_warn_args_and_kwargs(type(self), "corr", None, kwargs)
self._validate_numeric_only("corr", numeric_only)
def cov_func(x, y):
diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py
index dcdcbc0483d59..e997ffe1ec132 100644
--- a/pandas/core/window/expanding.py
+++ b/pandas/core/window/expanding.py
@@ -25,6 +25,7 @@
ExpandingIndexer,
GroupbyIndexer,
)
+from pandas.core.window.common import maybe_warn_args_and_kwargs
from pandas.core.window.doc import (
_shared_docs,
args_compat,
@@ -252,6 +253,7 @@ def sum(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "sum", args, kwargs)
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(
numeric_only=numeric_only,
@@ -285,6 +287,7 @@ def max(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "max", args, kwargs)
nv.validate_expanding_func("max", args, kwargs)
return super().max(
numeric_only=numeric_only,
@@ -318,6 +321,7 @@ def min(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "min", args, kwargs)
nv.validate_expanding_func("min", args, kwargs)
return super().min(
numeric_only=numeric_only,
@@ -351,6 +355,7 @@ def mean(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "mean", args, kwargs)
nv.validate_expanding_func("mean", args, kwargs)
return super().mean(
numeric_only=numeric_only,
@@ -382,6 +387,7 @@ def median(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "median", None, kwargs)
return super().median(
numeric_only=numeric_only,
engine=engine,
@@ -446,6 +452,7 @@ def std(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "std", args, kwargs)
nv.validate_expanding_func("std", args, kwargs)
return super().std(
ddof=ddof,
@@ -512,6 +519,7 @@ def var(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "var", args, kwargs)
nv.validate_expanding_func("var", args, kwargs)
return super().var(
ddof=ddof,
@@ -557,8 +565,9 @@ def var(
aggregation_description="standard error of mean",
agg_method="sem",
)
- def sem(self, ddof: int = 1, *args, **kwargs):
- return super().sem(ddof=ddof, **kwargs)
+ def sem(self, ddof: int = 1, numeric_only: bool = False, *args, **kwargs):
+ maybe_warn_args_and_kwargs(type(self), "sem", args, kwargs)
+ return super().sem(ddof=ddof, numeric_only=numeric_only, **kwargs)
@doc(
template_header,
@@ -577,6 +586,7 @@ def sem(self, ddof: int = 1, *args, **kwargs):
agg_method="skew",
)
def skew(self, numeric_only: bool = False, **kwargs):
+ maybe_warn_args_and_kwargs(type(self), "skew", None, kwargs)
return super().skew(numeric_only=numeric_only, **kwargs)
@doc(
@@ -618,6 +628,7 @@ def skew(self, numeric_only: bool = False, **kwargs):
agg_method="kurt",
)
def kurt(self, numeric_only: bool = False, **kwargs):
+ maybe_warn_args_and_kwargs(type(self), "kurt", None, kwargs)
return super().kurt(numeric_only=numeric_only, **kwargs)
@doc(
@@ -656,6 +667,7 @@ def quantile(
numeric_only: bool = False,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "quantile", None, kwargs)
return super().quantile(
quantile=quantile,
interpolation=interpolation,
@@ -733,6 +745,7 @@ def rank(
numeric_only: bool = False,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "rank", None, kwargs)
return super().rank(
method=method,
ascending=ascending,
@@ -779,6 +792,7 @@ def cov(
numeric_only: bool = False,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "cov", None, kwargs)
return super().cov(
other=other,
pairwise=pairwise,
@@ -852,6 +866,7 @@ def corr(
numeric_only: bool = False,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "corr", None, kwargs)
return super().corr(
other=other,
pairwise=pairwise,
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 93f07c5d75625..84915e2f52f17 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -79,6 +79,7 @@
)
from pandas.core.window.common import (
flex_binary_moment,
+ maybe_warn_args_and_kwargs,
zsqrt,
)
from pandas.core.window.doc import (
@@ -2080,6 +2081,7 @@ def sum(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "sum", args, kwargs)
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(
numeric_only=numeric_only,
@@ -2113,6 +2115,7 @@ def max(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "max", args, kwargs)
nv.validate_rolling_func("max", args, kwargs)
return super().max(
numeric_only=numeric_only,
@@ -2161,6 +2164,7 @@ def min(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "min", args, kwargs)
nv.validate_rolling_func("min", args, kwargs)
return super().min(
numeric_only=numeric_only,
@@ -2216,6 +2220,7 @@ def mean(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "mean", args, kwargs)
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(
numeric_only=numeric_only,
@@ -2262,6 +2267,7 @@ def median(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "median", None, kwargs)
return super().median(
numeric_only=numeric_only,
engine=engine,
@@ -2325,6 +2331,7 @@ def std(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "std", args, kwargs)
nv.validate_rolling_func("std", args, kwargs)
return super().std(
ddof=ddof,
@@ -2390,6 +2397,7 @@ def var(
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "var", args, kwargs)
nv.validate_rolling_func("var", args, kwargs)
return super().var(
ddof=ddof,
@@ -2416,6 +2424,7 @@ def var(
agg_method="skew",
)
def skew(self, numeric_only: bool = False, **kwargs):
+ maybe_warn_args_and_kwargs(type(self), "skew", None, kwargs)
return super().skew(numeric_only=numeric_only, **kwargs)
@doc(
@@ -2454,6 +2463,7 @@ def skew(self, numeric_only: bool = False, **kwargs):
agg_method="sem",
)
def sem(self, ddof: int = 1, numeric_only: bool = False, *args, **kwargs):
+ maybe_warn_args_and_kwargs(type(self), "sem", args, kwargs)
nv.validate_rolling_func("sem", args, kwargs)
# Raise here so error message says sem instead of std
self._validate_numeric_only("sem", numeric_only)
@@ -2500,6 +2510,7 @@ def sem(self, ddof: int = 1, numeric_only: bool = False, *args, **kwargs):
agg_method="kurt",
)
def kurt(self, numeric_only: bool = False, **kwargs):
+ maybe_warn_args_and_kwargs(type(self), "kurt", None, kwargs)
return super().kurt(numeric_only=numeric_only, **kwargs)
@doc(
@@ -2557,6 +2568,7 @@ def quantile(
numeric_only: bool = False,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "quantile", None, kwargs)
return super().quantile(
quantile=quantile,
interpolation=interpolation,
@@ -2634,6 +2646,7 @@ def rank(
numeric_only: bool = False,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "rank", None, kwargs)
return super().rank(
method=method,
ascending=ascending,
@@ -2680,6 +2693,7 @@ def cov(
numeric_only: bool = False,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "cov", None, kwargs)
return super().cov(
other=other,
pairwise=pairwise,
@@ -2813,6 +2827,7 @@ def corr(
numeric_only: bool = False,
**kwargs,
):
+ maybe_warn_args_and_kwargs(type(self), "corr", None, kwargs)
return super().corr(
other=other,
pairwise=pairwise,
diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py
index 2a8caa1d42d4d..6495f7411938c 100644
--- a/pandas/tests/window/test_api.py
+++ b/pandas/tests/window/test_api.py
@@ -1,7 +1,10 @@
import numpy as np
import pytest
-from pandas.errors import SpecificationError
+from pandas.errors import (
+ SpecificationError,
+ UnsupportedFunctionCall,
+)
from pandas import (
DataFrame,
@@ -409,3 +412,78 @@ def test_rolling_max_min_periods(step):
msg = "min_periods 5 must be <= window 3"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3]).rolling(window=3, min_periods=5, step=step).max()
+
+
+@pytest.mark.parametrize(
+ "roll_type, class_name",
+ [
+ ("rolling", "Rolling"),
+ ("expanding", "Expanding"),
+ ("ewm", "ExponentialMovingWindow"),
+ ],
+)
+@pytest.mark.parametrize(
+ "kernel, has_args, raises",
+ [
+ ("sum", True, True),
+ ("max", True, True),
+ ("min", True, True),
+ ("mean", True, True),
+ ("median", False, False),
+ ("std", True, True),
+ ("var", True, True),
+ ("skew", False, False),
+ ("sem", True, True),
+ ("kurt", False, False),
+ ("quantile", False, False),
+ ("rank", False, False),
+ ("cov", False, False),
+ ("corr", False, False),
+ ],
+)
+def test_args_kwargs_depr(roll_type, class_name, kernel, has_args, raises):
+ # GH#47836
+ r = getattr(Series([2, 4, 6]), roll_type)(2)
+ error_msg = "numpy operations are not valid with window objects"
+ if kernel == "quantile":
+ required_args = (0.5,)
+ else:
+ required_args = ()
+
+ if roll_type == "ewm" and kernel not in (
+ "sum",
+ "mean",
+ "std",
+ "var",
+ "cov",
+ "corr",
+ ):
+ # kernels not implemented for ewm
+ with pytest.raises(AttributeError, match=f"has no attribute '{kernel}'"):
+ getattr(r, kernel)
+ else:
+ warn_msg = f"Passing additional kwargs to {class_name}.{kernel}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ if raises:
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(r, kernel)(*required_args, dtype=np.float64)
+ else:
+ getattr(r, kernel)(*required_args, dtype=np.float64)
+
+ if has_args:
+ warn_msg = f"Passing additional args to {class_name}.{kernel}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ # sem raises for rolling but not expanding
+ if raises and (roll_type != "expanding" or kernel != "sem"):
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(r, kernel)(*required_args, 1, 2, 3, 4)
+ else:
+ getattr(r, kernel)(*required_args, 1, 2, 3, 4)
+
+ warn_msg = f"Passing additional args and kwargs to {class_name}.{kernel}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ if raises:
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(r, kernel)(*required_args, 1, 2, 3, 4, dtype=np.float64)
+ else:
+ getattr(r, kernel)(*required_args, 1, 2, 3, 4, dtype=np.float64)
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index e0051ee6d51c6..b524eb5978fa0 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -69,12 +69,16 @@ def test_numpy_compat(method):
# see gh-12811
e = ExponentialMovingWindow(Series([2, 4, 6]), alpha=0.5)
- msg = "numpy operations are not valid with window objects"
-
- with pytest.raises(UnsupportedFunctionCall, match=msg):
- getattr(e, method)(1, 2, 3)
- with pytest.raises(UnsupportedFunctionCall, match=msg):
- getattr(e, method)(dtype=np.float64)
+ error_msg = "numpy operations are not valid with window objects"
+
+ warn_msg = f"Passing additional args to ExponentialMovingWindow.{method}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(e, method)(1, 2, 3)
+ warn_msg = f"Passing additional kwargs to ExponentialMovingWindow.{method}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(e, method)(dtype=np.float64)
def test_ewma_times_not_datetime_type():
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index e0c9294c445f2..a12997018052d 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -59,12 +59,16 @@ def test_numpy_compat(method):
# see gh-12811
e = Expanding(Series([2, 4, 6]))
- msg = "numpy operations are not valid with window objects"
-
- with pytest.raises(UnsupportedFunctionCall, match=msg):
- getattr(e, method)(1, 2, 3)
- with pytest.raises(UnsupportedFunctionCall, match=msg):
- getattr(e, method)(dtype=np.float64)
+ error_msg = "numpy operations are not valid with window objects"
+
+ warn_msg = f"Passing additional args to Expanding.{method}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(e, method)(1, 2, 3)
+ warn_msg = f"Passing additional kwargs to Expanding.{method}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(e, method)(dtype=np.float64)
@pytest.mark.parametrize(
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 785603f6e05f0..c9ec2985488be 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -158,12 +158,16 @@ def test_numpy_compat(method):
# see gh-12811
r = Rolling(Series([2, 4, 6]), window=2)
- msg = "numpy operations are not valid with window objects"
-
- with pytest.raises(UnsupportedFunctionCall, match=msg):
- getattr(r, method)(1, 2, 3)
- with pytest.raises(UnsupportedFunctionCall, match=msg):
- getattr(r, method)(dtype=np.float64)
+ error_msg = "numpy operations are not valid with window objects"
+
+ warn_msg = f"Passing additional args to Rolling.{method}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(r, method)(1, 2, 3)
+ warn_msg = f"Passing additional kwargs to Rolling.{method}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(r, method)(dtype=np.float64)
@pytest.mark.parametrize("closed", ["right", "left", "both", "neither"])
diff --git a/pandas/tests/window/test_win_type.py b/pandas/tests/window/test_win_type.py
index 8c8e9cadbfdc1..ba80ac19a6b6a 100644
--- a/pandas/tests/window/test_win_type.py
+++ b/pandas/tests/window/test_win_type.py
@@ -79,12 +79,16 @@ def test_numpy_compat(method):
# see gh-12811
w = Series([2, 4, 6]).rolling(window=2)
- msg = "numpy operations are not valid with window objects"
-
- with pytest.raises(UnsupportedFunctionCall, match=msg):
- getattr(w, method)(1, 2, 3)
- with pytest.raises(UnsupportedFunctionCall, match=msg):
- getattr(w, method)(dtype=np.float64)
+ error_msg = "numpy operations are not valid with window objects"
+
+ warn_msg = f"Passing additional args to Rolling.{method}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(w, method)(1, 2, 3)
+ warn_msg = f"Passing additional kwargs to Rolling.{method}"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ with pytest.raises(UnsupportedFunctionCall, match=error_msg):
+ getattr(w, method)(dtype=np.float64)
@td.skip_if_no_scipy
| - [x] closes #47836 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
We can't call `maybe_warn_args_and_kwargs` any further in the stack (e.g. in `_apply`) because args aren't passed along. | https://api.github.com/repos/pandas-dev/pandas/pulls/47851 | 2022-07-25T21:06:17Z | 2022-07-29T01:33:20Z | 2022-07-29T01:33:20Z | 2022-07-29T02:53:54Z |
TYP/CLN: small cleanups from flake-pyi | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f8cb869e6ed89..f65c42dab1852 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -230,7 +230,7 @@ repos:
language: python
additional_dependencies:
- flake8==4.0.1
- - flake8-pyi==22.5.1
+ - flake8-pyi==22.7.0
- id: future-annotations
name: import annotations from __future__
entry: 'from __future__ import annotations'
diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi
index f55ff0ae8b574..9ffcf25f6eacd 100644
--- a/pandas/_libs/algos.pyi
+++ b/pandas/_libs/algos.pyi
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from typing import Any
import numpy as np
diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi
index bad0f2bab93d8..b27d2b5f8fd4d 100644
--- a/pandas/_libs/interval.pyi
+++ b/pandas/_libs/interval.pyi
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from typing import (
Any,
Generic,
@@ -84,7 +82,7 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[_OrderableTimesT], key: _OrderableTimesT
) -> bool: ...
@overload
- def __contains__(self: Interval[_OrderableScalarT], key: int | float) -> bool: ...
+ def __contains__(self: Interval[_OrderableScalarT], key: float) -> bool: ...
@overload
def __add__(
self: Interval[_OrderableTimesT], y: Timedelta
@@ -94,7 +92,7 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
- def __add__(self: Interval[float], y: int | float) -> Interval[float]: ...
+ def __add__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __radd__(
self: Interval[_OrderableTimesT], y: Timedelta
@@ -104,7 +102,7 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
- def __radd__(self: Interval[float], y: int | float) -> Interval[float]: ...
+ def __radd__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __sub__(
self: Interval[_OrderableTimesT], y: Timedelta
@@ -114,7 +112,7 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
- def __sub__(self: Interval[float], y: int | float) -> Interval[float]: ...
+ def __sub__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __rsub__(
self: Interval[_OrderableTimesT], y: Timedelta
@@ -124,31 +122,31 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
- def __rsub__(self: Interval[float], y: int | float) -> Interval[float]: ...
+ def __rsub__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __mul__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
- def __mul__(self: Interval[float], y: int | float) -> Interval[float]: ...
+ def __mul__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __rmul__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
- def __rmul__(self: Interval[float], y: int | float) -> Interval[float]: ...
+ def __rmul__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __truediv__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
- def __truediv__(self: Interval[float], y: int | float) -> Interval[float]: ...
+ def __truediv__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __floordiv__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
- def __floordiv__(self: Interval[float], y: int | float) -> Interval[float]: ...
+ def __floordiv__(self: Interval[float], y: float) -> Interval[float]: ...
def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ...
def intervals_to_interval_bounds(
diff --git a/pandas/_libs/join.pyi b/pandas/_libs/join.pyi
index 8d02f8f57dee1..11b65b859095f 100644
--- a/pandas/_libs/join.pyi
+++ b/pandas/_libs/join.pyi
@@ -55,7 +55,7 @@ def asof_join_backward_on_X_by_Y(
left_by_values: np.ndarray, # by_t[:]
right_by_values: np.ndarray, # by_t[:]
allow_exact_matches: bool = ...,
- tolerance: np.number | int | float | None = ...,
+ tolerance: np.number | float | None = ...,
use_hashtable: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
def asof_join_forward_on_X_by_Y(
@@ -64,7 +64,7 @@ def asof_join_forward_on_X_by_Y(
left_by_values: np.ndarray, # by_t[:]
right_by_values: np.ndarray, # by_t[:]
allow_exact_matches: bool = ...,
- tolerance: np.number | int | float | None = ...,
+ tolerance: np.number | float | None = ...,
use_hashtable: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
def asof_join_nearest_on_X_by_Y(
@@ -73,6 +73,6 @@ def asof_join_nearest_on_X_by_Y(
left_by_values: np.ndarray, # by_t[:]
right_by_values: np.ndarray, # by_t[:]
allow_exact_matches: bool = ...,
- tolerance: np.number | int | float | None = ...,
+ tolerance: np.number | float | None = ...,
use_hashtable: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
diff --git a/pandas/_libs/json.pyi b/pandas/_libs/json.pyi
index 555484a37c83f..8e7ba60ccce24 100644
--- a/pandas/_libs/json.pyi
+++ b/pandas/_libs/json.pyi
@@ -12,7 +12,7 @@ def dumps(
date_unit: str = ...,
iso_dates: bool = ...,
default_handler: None
- | Callable[[Any], str | int | float | bool | list | dict | None] = ...,
+ | Callable[[Any], str | float | bool | list | dict | None] = ...,
) -> str: ...
def loads(
s: str,
diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi
index c885b869f983a..c3d550c7a5ba9 100644
--- a/pandas/_libs/tslibs/offsets.pyi
+++ b/pandas/_libs/tslibs/offsets.pyi
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from datetime import (
datetime,
timedelta,
diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi
index 1921329da9e24..7969c0901e08f 100644
--- a/pandas/_libs/tslibs/timedeltas.pyi
+++ b/pandas/_libs/tslibs/timedeltas.pyi
@@ -86,7 +86,7 @@ class Timedelta(timedelta):
cls: type[_S],
value=...,
unit: str = ...,
- **kwargs: int | float | np.integer | np.floating,
+ **kwargs: float | np.integer | np.floating,
) -> _S: ...
# GH 46171
# While Timedelta can return pd.NaT, having the constructor return
@@ -123,7 +123,7 @@ class Timedelta(timedelta):
@overload # type: ignore[override]
def __floordiv__(self, other: timedelta) -> int: ...
@overload
- def __floordiv__(self, other: int | float) -> Timedelta: ...
+ def __floordiv__(self, other: float) -> Timedelta: ...
@overload
def __floordiv__(
self, other: npt.NDArray[np.timedelta64]
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index 082f26cf6f213..f39d1f44d82c0 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -33,13 +33,7 @@ class Timestamp(datetime):
value: int # np.int64
def __new__(
cls: type[_DatetimeT],
- ts_input: int
- | np.integer
- | float
- | str
- | _date
- | datetime
- | np.datetime64 = ...,
+ ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ...,
freq: int | None | str | BaseOffset = ...,
tz: str | _tzinfo | None | int = ...,
unit: str | int | None = ...,
diff --git a/pandas/_libs/writers.pyi b/pandas/_libs/writers.pyi
index 0d2096eee3573..611c0c7cd1512 100644
--- a/pandas/_libs/writers.pyi
+++ b/pandas/_libs/writers.pyi
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import numpy as np
from pandas._typing import ArrayLike
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 4bc5f75400455..6e531d10336b5 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -82,7 +82,7 @@
# scalars
-PythonScalar = Union[str, int, float, bool]
+PythonScalar = Union[str, float, bool]
DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"]
PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"]
Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime]
@@ -92,10 +92,10 @@
# timestamp and timedelta convertible types
TimestampConvertibleTypes = Union[
- "Timestamp", datetime, np.datetime64, int, np.int64, float, str
+ "Timestamp", datetime, np.datetime64, np.int64, float, str
]
TimedeltaConvertibleTypes = Union[
- "Timedelta", timedelta, np.timedelta64, int, np.int64, float, str
+ "Timedelta", timedelta, np.timedelta64, np.int64, float, str
]
Timezone = Union[str, tzinfo]
@@ -126,7 +126,7 @@
]
# dtypes
-NpDtype = Union[str, np.dtype, type_t[Union[str, float, int, complex, bool, object]]]
+NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]]
Dtype = Union["ExtensionDtype", NpDtype]
AstypeArg = Union["ExtensionDtype", "npt.DTypeLike"]
# DtypeArg specifies all allowable dtypes in a functions its dtype argument
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index c3fbd716ad09d..ea9414aaaa1a8 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -2170,11 +2170,11 @@ def validate_periods(periods: None) -> None:
@overload
-def validate_periods(periods: int | float) -> int:
+def validate_periods(periods: float) -> int:
...
-def validate_periods(periods: int | float | None) -> int | None:
+def validate_periods(periods: float | None) -> int | None:
"""
If a `periods` argument is passed to the Datetime/Timedelta Array/Index
constructor, cast it to an integer.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 47203fbf315e5..a541cbfe502fb 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2563,7 +2563,7 @@ def to_stata(
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
*,
- value_labels: dict[Hashable, dict[float | int, str]] | None = None,
+ value_labels: dict[Hashable, dict[float, str]] | None = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 81766dc91f271..6658b25d09e6d 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -827,7 +827,7 @@ def _get_counts_nanvar(
axis: int | None,
ddof: int,
dtype: np.dtype = np.dtype(np.float64),
-) -> tuple[int | float | np.ndarray, int | float | np.ndarray]:
+) -> tuple[float | np.ndarray, float | np.ndarray]:
"""
Get the count of non-null values along an axis, accounting
for degrees of freedom.
@@ -1414,7 +1414,7 @@ def _get_counts(
mask: npt.NDArray[np.bool_] | None,
axis: int | None,
dtype: np.dtype = np.dtype(np.float64),
-) -> int | float | np.ndarray:
+) -> float | np.ndarray:
"""
Get the count of non-null values along an axis
diff --git a/pandas/core/reshape/encoding.py b/pandas/core/reshape/encoding.py
index fc908a5648885..da4de8cc57e65 100644
--- a/pandas/core/reshape/encoding.py
+++ b/pandas/core/reshape/encoding.py
@@ -272,7 +272,7 @@ def get_empty_frame(data) -> DataFrame:
if sparse:
- fill_value: bool | float | int
+ fill_value: bool | float
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == np.dtype(bool):
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 1ec0e6ca83d8f..7ec4bc1016a9d 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -91,7 +91,7 @@
# types used in annotations
ArrayConvertible = Union[List, Tuple, AnyArrayLike]
-Scalar = Union[int, float, str]
+Scalar = Union[float, str]
DatetimeScalar = Union[Scalar, datetime]
DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible]
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index 720d02f0cf59e..5026c97c0b2b0 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -45,7 +45,7 @@
@overload
def to_timedelta(
- arg: str | int | float | timedelta,
+ arg: str | float | timedelta,
unit: UnitChoices | None = ...,
errors: DateTimeErrorChoices = ...,
) -> Timedelta:
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 3f9f6c7a5fee7..4279a9707e692 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -331,7 +331,7 @@ def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]
def build_font(
self, props: Mapping[str, str]
- ) -> dict[str, bool | int | float | str | None]:
+ ) -> dict[str, bool | float | str | None]:
font_names = self._get_font_names(props)
decoration = self._get_decoration(props)
return {
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 57bc534bd67c4..58beba4ddcb44 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1447,7 +1447,7 @@ def __init__(self, *args, **kwargs) -> None:
def _value_formatter(
self,
float_format: FloatFormatType | None = None,
- threshold: float | int | None = None,
+ threshold: float | None = None,
) -> Callable:
"""Returns a function to be applied on each value to format it"""
# the float_format parameter supersedes self.float_format
@@ -2047,7 +2047,7 @@ def __init__(
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
- def __call__(self, num: int | float) -> str:
+ def __call__(self, num: float) -> str:
"""
Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index 07ec50a2cd6a8..543df8bec657b 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -325,7 +325,7 @@ def _put_str(s: str | Dtype, space: int) -> str:
return str(s)[:space].ljust(space)
-def _sizeof_fmt(num: int | float, size_qualifier: str) -> str:
+def _sizeof_fmt(num: float, size_qualifier: str) -> str:
"""
Return size in human readable format.
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index a557f9b5c0a0d..01b4812d3dc2a 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -3213,7 +3213,7 @@ def bar(
cmap: Any | None = None,
width: float = 100,
height: float = 100,
- align: str | float | int | Callable = "mid",
+ align: str | float | Callable = "mid",
vmin: float | None = None,
vmax: float | None = None,
props: str = "width: 10em;",
@@ -4039,7 +4039,7 @@ def _highlight_value(data: DataFrame | Series, op: str, props: str) -> np.ndarra
def _bar(
data: NDFrame,
- align: str | float | int | Callable,
+ align: str | float | Callable,
colors: str | list | tuple,
cmap: Any,
width: float,
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 65720e675a77a..dba161cf6d45c 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -48,7 +48,7 @@
BaseFormatter = Union[str, Callable]
ExtFormatter = Union[BaseFormatter, Dict[Any, Optional[BaseFormatter]]]
-CSSPair = Tuple[str, Union[str, int, float]]
+CSSPair = Tuple[str, Union[str, float]]
CSSList = List[CSSPair]
CSSProperties = Union[str, CSSList]
@@ -2055,7 +2055,7 @@ def _parse_latex_header_span(
return display_val
-def _parse_latex_options_strip(value: str | int | float, arg: str) -> str:
+def _parse_latex_options_strip(value: str | float, arg: str) -> str:
"""
Strip a css_value which may have latex wrapping arguments, css comment identifiers,
and whitespaces, to a valid string for latex options parsing.
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 4858d56d71c42..dc4556542d8e2 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -499,7 +499,7 @@ def validate_integer(name, val: None, min_val=...) -> None:
@overload
-def validate_integer(name, val: int | float, min_val=...) -> int:
+def validate_integer(name, val: float, min_val=...) -> int:
...
@@ -1910,7 +1910,7 @@ def _floatify_na_values(na_values):
def _stringify_na_values(na_values):
"""return a stringified and numeric for these values"""
- result: list[int | str | float] = []
+ result: list[str | float] = []
for x in na_values:
result.append(str(x))
result.append(x)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 2662df0361b55..80b6db2500d28 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -678,7 +678,7 @@ def __init__(
self.labname = catarray.name
self._encoding = encoding
categories = catarray.cat.categories
- self.value_labels: list[tuple[int | float, str]] = list(
+ self.value_labels: list[tuple[float, str]] = list(
zip(np.arange(len(categories)), categories)
)
self.value_labels.sort(key=lambda x: x[0])
@@ -699,7 +699,7 @@ def _prepare_value_labels(self):
# Compute lengths and setup lists of offsets and labels
offsets: list[int] = []
- values: list[int | float] = []
+ values: list[float] = []
for vl in self.value_labels:
category: str | bytes = vl[1]
if not isinstance(category, str):
@@ -798,7 +798,7 @@ class StataNonCatValueLabel(StataValueLabel):
def __init__(
self,
labname: str,
- value_labels: dict[float | int, str],
+ value_labels: dict[float, str],
encoding: Literal["latin-1", "utf-8"] = "latin-1",
) -> None:
@@ -807,7 +807,7 @@ def __init__(
self.labname = labname
self._encoding = encoding
- self.value_labels: list[tuple[int | float, str]] = sorted(
+ self.value_labels: list[tuple[float, str]] = sorted(
value_labels.items(), key=lambda x: x[0]
)
self._prepare_value_labels()
@@ -887,7 +887,7 @@ class StataMissingValue:
"float64": struct.unpack("<d", float64_base)[0],
}
- def __init__(self, value: int | float) -> None:
+ def __init__(self, value: float) -> None:
self._value = value
# Conversion to int to avoid hash issues on 32 bit platforms #8968
value = int(value) if value < 2147483648 else float(value)
@@ -906,7 +906,7 @@ def string(self) -> str:
return self._str
@property
- def value(self) -> int | float:
+ def value(self) -> float:
"""
The binary representation of the missing value.
@@ -931,7 +931,7 @@ def __eq__(self, other: Any) -> bool:
)
@classmethod
- def get_base_missing_value(cls, dtype: np.dtype) -> int | float:
+ def get_base_missing_value(cls, dtype: np.dtype) -> float:
if dtype.type is np.int8:
value = cls.BASE_MISSING_VALUES["int8"]
elif dtype.type is np.int16:
@@ -1526,7 +1526,7 @@ def _read_value_labels(self) -> None:
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
self._value_labels_read = True
- self.value_label_dict: dict[str, dict[float | int, str]] = {}
+ self.value_label_dict: dict[str, dict[float, str]] = {}
return
if self.format_version >= 117:
@@ -1886,7 +1886,7 @@ def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFra
def _do_convert_categoricals(
self,
data: DataFrame,
- value_label_dict: dict[str, dict[float | int, str]],
+ value_label_dict: dict[str, dict[float, str]],
lbllist: Sequence[str],
order_categoricals: bool,
) -> DataFrame:
@@ -1977,7 +1977,7 @@ def variable_labels(self) -> dict[str, str]:
"""
return dict(zip(self.varlist, self._variable_labels))
- def value_labels(self) -> dict[str, dict[float | int, str]]:
+ def value_labels(self) -> dict[str, dict[float, str]]:
"""
Return a dict, associating each variable name a dict, associating
each value its corresponding label.
@@ -2270,7 +2270,7 @@ def __init__(
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
*,
- value_labels: dict[Hashable, dict[float | int, str]] | None = None,
+ value_labels: dict[Hashable, dict[float, str]] | None = None,
) -> None:
super().__init__()
self.data = data
@@ -3200,7 +3200,7 @@ def __init__(
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
*,
- value_labels: dict[Hashable, dict[float | int, str]] | None = None,
+ value_labels: dict[Hashable, dict[float, str]] | None = None,
) -> None:
# Copy to new list since convert_strl might be modified later
self._convert_strl: list[Hashable] = []
@@ -3597,7 +3597,7 @@ def __init__(
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
*,
- value_labels: dict[Hashable, dict[float | int, str]] | None = None,
+ value_labels: dict[Hashable, dict[float, str]] | None = None,
) -> None:
if version is None:
version = 118 if data.shape[1] <= 32767 else 119
| Most of these changes are:
> Y041 Use "float" instead of "int | float" (see "The numeric tower" in PEP 484)
flake8-pyi would probably find similar issues in the in-line annotations but flake8-pyi does at the moment only support pyi files. | https://api.github.com/repos/pandas-dev/pandas/pulls/47850 | 2022-07-25T20:42:55Z | 2022-07-26T22:34:01Z | 2022-07-26T22:34:01Z | 2022-09-28T20:57:30Z |
Added improvements in to_datetime Error reporting message - Outofbounds error message | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 9c7f35d240f96..b3e51191e8efa 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -652,7 +652,8 @@ cpdef array_to_datetime(
else:
raise TypeError(f"{type(val)} is not convertible to datetime")
- except OutOfBoundsDatetime:
+ except OutOfBoundsDatetime as ex:
+ ex.args = (str(ex) + f" present at position {i}", )
if is_coerce:
iresult[i] = NPY_NAT
continue
diff --git a/pandas/tests/base/test_constructors.py b/pandas/tests/base/test_constructors.py
index 44d6bc57b0431..858eaacd67ec2 100644
--- a/pandas/tests/base/test_constructors.py
+++ b/pandas/tests/base/test_constructors.py
@@ -157,7 +157,7 @@ def test_constructor_datetime_outofbound(self, a, klass):
# Explicit dtype specified
# Forced conversion fails for all -> all cases raise error
- msg = "Out of bounds"
+ msg = "Out of bounds|Out of bounds .* present at position 0"
with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg):
klass(a, dtype="datetime64[ns]")
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index e971e311e7d20..086cb18dbe463 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -521,7 +521,7 @@ def test_construction_outofbounds(self):
# coerces to object
tm.assert_index_equal(Index(dates), exp)
- msg = "Out of bounds nanosecond timestamp"
+ msg = "Out of bounds .* present at position 0"
with pytest.raises(OutOfBoundsDatetime, match=msg):
# can't create DatetimeIndex
DatetimeIndex(dates)
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index c60e56875bfcd..890590094094a 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -38,7 +38,7 @@ def test_dti_date(self):
@pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]])
def test_dti_date_out_of_range(self, data):
# GH#1475
- msg = "Out of bounds nanosecond timestamp: 1400-01-01 00:00:00"
+ msg = "Out of bounds .* present at position 0"
with pytest.raises(OutOfBoundsDatetime, match=msg):
DatetimeIndex(data)
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index afa06bf1a79af..b128838318ac0 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -688,9 +688,10 @@ def test_to_datetime_dt64s(self, cache, dt):
"dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")]
)
def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):
- msg = f"Out of bounds nanosecond timestamp: {dt}"
+ msg = "Out of bounds .* present at position 0"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(dt, errors="raise")
+ msg = f"Out of bounds nanosecond timestamp: {dt}"
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp(dt)
assert to_datetime(dt, errors="coerce", cache=cache) is NaT
@@ -973,13 +974,13 @@ def test_datetime_outofbounds_scalar(self, value, format, infer):
assert res is NaT
if format is not None:
- msg = "is a bad directive in format|Out of bounds nanosecond timestamp"
+ msg = "is a bad directive in format|Out of bounds .* present at position 0"
with pytest.raises(ValueError, match=msg):
to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
else:
- msg = "Out of bounds nanosecond timestamp"
+ msg = "Out of bounds .* present at position 0"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
@@ -1700,7 +1701,7 @@ def test_to_datetime_barely_out_of_bounds(self):
# in an in-bounds datetime
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
- msg = "Out of bounds nanosecond timestamp"
+ msg = "Out of bounds .* present at position 0"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(arr)
@@ -2593,7 +2594,10 @@ def test_invalid_origins_tzinfo(self):
@pytest.mark.parametrize("format", [None, "%Y-%m-%d %H:%M:%S"])
def test_to_datetime_out_of_bounds_with_format_arg(self, format):
# see gh-23830
- msg = "Out of bounds nanosecond timestamp"
+ msg = (
+ "Out of bounds nanosecond timestamp: 2417-10-27 00:00:00 "
+ "present at position 0"
+ )
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime("2417-10-27 00:00:00", format=format)
diff --git a/pandas/tests/tslibs/test_array_to_datetime.py b/pandas/tests/tslibs/test_array_to_datetime.py
index a0fafc227e001..64a45f6507810 100644
--- a/pandas/tests/tslibs/test_array_to_datetime.py
+++ b/pandas/tests/tslibs/test_array_to_datetime.py
@@ -125,7 +125,7 @@ def test_coerce_outside_ns_bounds(invalid_date, errors):
kwargs = {"values": arr, "errors": errors}
if errors == "raise":
- msg = "Out of bounds nanosecond timestamp"
+ msg = "Out of bounds .* present at position 0"
with pytest.raises(ValueError, match=msg):
tslib.array_to_datetime(**kwargs)
@@ -170,7 +170,9 @@ def test_to_datetime_barely_out_of_bounds():
# Close enough to bounds that dropping nanos
# would result in an in-bounds datetime.
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
- msg = "Out of bounds nanosecond timestamp: 2262-04-11 23:47:16"
+ msg = (
+ "Out of bounds nanosecond timestamp: 2262-04-11 23:47:16 present at position 0"
+ )
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
tslib.array_to_datetime(arr)
| Splitting changes from #47597, This PR aims to close the bug #46509
- Changes description
- For issue #46509 - added more details in OutOfBoundsDatetime exception about field and its position causing the issue in `pandas/_libs/tslib.pyx` in `array_to_datetime`
- Updated relevant testcases
- Example of changed error messages
| Code | Old Error ( on 1.4.3 ) | New Error |
| ------------- | ------------- | ------------- |
| pd.to_datetime(pd.Series(["11:00", "12:00", "11:0"])) | pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime: Out of bounds nanosecond timestamp: 1-01-01 11:00:00 | pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime: Out of bounds nanosecond timestamp: 1-01-01 11:00:00 present at position 2 |
| pd.to_datetime(['11:0']) | pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime: Out of bounds nanosecond timestamp: 1-01-01 11:00:00 | pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime: Out of bounds nanosecond timestamp: 1-01-01 11:00:00 present at position 0 |
| pd.to_datetime('11:0') | pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime: Out of bounds nanosecond timestamp: 1-01-01 11:00:00 | pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime: Out of bounds nanosecond timestamp: 1-01-01 11:00:00 present at position 0 |
| https://api.github.com/repos/pandas-dev/pandas/pulls/47849 | 2022-07-25T19:22:07Z | 2022-08-01T18:50:15Z | 2022-08-01T18:50:15Z | 2022-08-31T18:22:07Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.