title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CI: xfail failing 32-bit tests
diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index 2c3d8b4608806..28e27791cad35 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -6,7 +6,7 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, Index, Series, Timestamp, concat +from pandas import DataFrame, Index, Series, Timestamp, compat, concat import pandas._testing as tm from pandas.core.base import SpecificationError @@ -277,7 +277,7 @@ def test_preserve_metadata(): @pytest.mark.parametrize( "func,window_size,expected_vals", [ - ( + pytest.param( "rolling", 2, [ @@ -289,6 +289,7 @@ def test_preserve_metadata(): [35.0, 40.0, 60.0, 40.0], [60.0, 80.0, 85.0, 80], ], + marks=pytest.mark.xfail(not compat.IS64, reason="GH-35294"), ), ( "expanding", diff --git a/pandas/tests/window/test_apply.py b/pandas/tests/window/test_apply.py index bc38634da8941..2aaf6af103e98 100644 --- a/pandas/tests/window/test_apply.py +++ b/pandas/tests/window/test_apply.py @@ -4,7 +4,7 @@ from pandas.errors import NumbaUtilError import pandas.util._test_decorators as td -from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range +from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range import pandas._testing as tm @@ -142,6 +142,7 @@ def test_invalid_kwargs_nopython(): @pytest.mark.parametrize("args_kwargs", [[None, {"par": 10}], [(10,), None]]) +@pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_apply_args_kwargs(args_kwargs): # GH 33433 def foo(x, par): diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index 5b2687271f9d6..744ca264e91d9 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas import DataFrame, Series +from pandas import DataFrame, Series, compat import pandas._testing as tm from pandas.core.groupby.groupby import get_groupby @@ -23,6 +23,7 @@ def test_mutated(self): g = get_groupby(self.frame, by="A", mutated=True) assert g.mutated + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_getitem(self): g = self.frame.groupby("A") g_mutated = get_groupby(self.frame, by="A", mutated=True) @@ -55,6 +56,7 @@ def test_getitem_multiple(self): result = r.B.count() tm.assert_series_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling(self): g = self.frame.groupby("A") r = g.rolling(window=4) @@ -72,6 +74,7 @@ def test_rolling(self): @pytest.mark.parametrize( "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"] ) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_quantile(self, interpolation): g = self.frame.groupby("A") r = g.rolling(window=4) @@ -102,6 +105,7 @@ def func(x): expected = g.apply(func) tm.assert_series_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_apply(self, raw): g = self.frame.groupby("A") r = g.rolling(window=4) @@ -111,6 +115,7 @@ def test_rolling_apply(self, raw): expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) tm.assert_frame_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_apply_mutability(self): # GH 14013 df = pd.DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6}) @@ -192,6 +197,7 @@ def test_expanding_apply(self, raw): tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]]) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_rolling(self, expected_value, raw_value): # GH 31754 diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py index 8aa4d7103e48a..90f919d5565b0 100644 --- a/pandas/tests/window/test_timeseries_window.py +++ b/pandas/tests/window/test_timeseries_window.py @@ -7,6 +7,7 @@ MultiIndex, Series, Timestamp, + compat, date_range, to_datetime, ) @@ -656,6 +657,7 @@ def agg_by_day(x): tm.assert_frame_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_monotonic(self): # GH 15130 @@ -685,6 +687,7 @@ def test_groupby_monotonic(self): result = df.groupby("name").rolling("180D", on="date")["amount"].sum() tm.assert_series_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_non_monotonic(self): # GH 13966 (similar to #15130, closed by #15175)
https://github.com/pandas-dev/pandas/issues/35294
https://api.github.com/repos/pandas-dev/pandas/pulls/35295
2020-07-15T19:44:33Z
2020-07-15T21:09:15Z
2020-07-15T21:09:15Z
2020-07-15T21:09:19Z
ENH: Add orient=tight format for dictionaries
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index b7efec8fd2e89..989963ec84db9 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -134,6 +134,28 @@ Previously, negative arguments returned empty frames. df.groupby("A").nth(slice(1, -1)) df.groupby("A").nth([slice(None, 1), slice(-1, None)]) +.. _whatsnew_140.dict_tight: + +DataFrame.from_dict and DataFrame.to_dict have new ``'tight'`` option +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A new ``'tight'`` dictionary format that preserves :class:`MultiIndex` entries and names +is now available with the :meth:`DataFrame.from_dict` and :meth:`DataFrame.to_dict` methods +and can be used with the standard ``json`` library to produce a tight +representation of :class:`DataFrame` objects (:issue:`4889`). + +.. ipython:: python + + df = pd.DataFrame.from_records( + [[1, 3], [2, 4]], + index=pd.MultiIndex.from_tuples([("a", "b"), ("a", "c")], + names=["n1", "n2"]), + columns=pd.MultiIndex.from_tuples([("x", 1), ("y", 2)], + names=["z1", "z2"]), + ) + df + df.to_dict(orient='tight') + .. _whatsnew_140.enhancements.other: Other enhancements diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1bb3dda0312cd..f9f5c89d4bd4d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1520,15 +1520,21 @@ def from_dict( ---------- data : dict Of the form {field : array-like} or {field : dict}. - orient : {'columns', 'index'}, default 'columns' + orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. + If 'tight', assume a dict with keys ['index', 'columns', 'data', + 'index_names', 'column_names']. + + .. versionadded:: 1.4.0 + 'tight' as an allowed value for the ``orient`` argument + dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError - if used with ``orient='columns'``. + if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- @@ -1539,6 +1545,7 @@ def from_dict( DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. + DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- @@ -1569,6 +1576,21 @@ def from_dict( A B C D row_1 3 2 1 0 row_2 a b c d + + Specify ``orient='tight'`` to create the DataFrame using a 'tight' + format: + + >>> data = {'index': [('a', 'b'), ('a', 'c')], + ... 'columns': [('x', 1), ('y', 2)], + ... 'data': [[1, 3], [2, 4]], + ... 'index_names': ['n1', 'n2'], + ... 'column_names': ['z1', 'z2']} + >>> pd.DataFrame.from_dict(data, orient='tight') + z1 x y + z2 1 2 + n1 n2 + a b 1 3 + c 2 4 """ index = None orient = orient.lower() @@ -1579,13 +1601,28 @@ def from_dict( data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) - elif orient == "columns": + elif orient == "columns" or orient == "tight": if columns is not None: - raise ValueError("cannot use columns parameter with orient='columns'") + raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError("only recognize index or columns for orient") - return cls(data, index=index, columns=columns, dtype=dtype) + if orient != "tight": + return cls(data, index=index, columns=columns, dtype=dtype) + else: + realdata = data["data"] + + def create_index(indexlist, namelist): + index: Index + if len(namelist) > 1: + index = MultiIndex.from_tuples(indexlist, names=namelist) + else: + index = Index(indexlist, name=namelist[0]) + return index + + index = create_index(data["index"], data["index_names"]) + columns = create_index(data["columns"], data["column_names"]) + return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, @@ -1675,6 +1712,9 @@ def to_dict(self, orient: str = "dict", into=dict): - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} + - 'tight' : dict like + {'index' -> [index], 'columns' -> [columns], 'data' -> [values], + 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} @@ -1682,6 +1722,9 @@ def to_dict(self, orient: str = "dict", into=dict): Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. + .. versionadded:: 1.4.0 + 'tight' as an allowed value for the ``orient`` argument + into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty @@ -1731,6 +1774,10 @@ def to_dict(self, orient: str = "dict", into=dict): >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} + >>> df.to_dict('tight') + {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], + 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} + You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict @@ -1807,6 +1854,23 @@ def to_dict(self, orient: str = "dict", into=dict): ) ) + elif orient == "tight": + return into_c( + ( + ("index", self.index.tolist()), + ("columns", self.columns.tolist()), + ( + "data", + [ + list(map(maybe_box_native, t)) + for t in self.itertuples(index=False, name=None) + ], + ), + ("index_names", list(self.index.names)), + ("column_names", list(self.columns.names)), + ) + ) + elif orient == "series": return into_c((k, v) for k, v in self.items()) diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py index c33f649206f54..31ea3e582eeb2 100644 --- a/pandas/tests/frame/methods/test_to_dict.py +++ b/pandas/tests/frame/methods/test_to_dict.py @@ -10,6 +10,8 @@ from pandas import ( DataFrame, + Index, + MultiIndex, Series, Timestamp, ) @@ -312,3 +314,33 @@ def test_to_dict_mixed_numeric_frame(self): result = df.reset_index().to_dict("records") expected = [{"index": 0, "a": 1.0, "b": 9.0}] assert result == expected + + @pytest.mark.parametrize( + "index", + [ + None, + Index(["aa", "bb"]), + Index(["aa", "bb"], name="cc"), + MultiIndex.from_tuples([("a", "b"), ("a", "c")]), + MultiIndex.from_tuples([("a", "b"), ("a", "c")], names=["n1", "n2"]), + ], + ) + @pytest.mark.parametrize( + "columns", + [ + ["x", "y"], + Index(["x", "y"]), + Index(["x", "y"], name="z"), + MultiIndex.from_tuples([("x", 1), ("y", 2)]), + MultiIndex.from_tuples([("x", 1), ("y", 2)], names=["z1", "z2"]), + ], + ) + def test_to_dict_orient_tight(self, index, columns): + df = DataFrame.from_records( + [[1, 3], [2, 4]], + columns=columns, + index=index, + ) + roundtrip = DataFrame.from_dict(df.to_dict(orient="tight"), orient="tight") + + tm.assert_frame_equal(df, roundtrip)
- [x] xref #4889 (see below) - [x] tests added / passed - tests.frame.methods.test_to_dict.test_to_dict_orient_tight() - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry The issue in #4889 contains examples of how the JSON format doesn't support `MultiIndex` for the index or the columns, independent of the `orient` chosen. Also, none of the orientations support index names (or `MultiIndex` names). Finally, if you want to have a JSON format that is "tight" (or "compact"), the `split` is the closest thing, but it is incomplete due to the indexing issues. As a first step to addressing this, I have created a `orient='tight'` option to `DataFrame.to_dict()` and `DataFrame.from_dict()` . If we agree that this is a reasonable representation, the next step (which I'd prefer to do in a second PR) would be to also support it with JSON. The challenge there is that `to_json()` is currently implemented in C. Right now, one can just use `json.dumps(df.to_dict(orient='tight'))` to get the needed JSON as a workaround. I'm open to changing the word `'tight'` to something else, but I wanted it to start with a different letter than the other orientations.
https://api.github.com/repos/pandas-dev/pandas/pulls/35292
2020-07-15T17:36:26Z
2021-10-16T15:50:53Z
2021-10-16T15:50:53Z
2023-02-13T20:55:24Z
xfail failing 32-bit tests
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index f7bb73b916ce0..b5a1dc2b2fb94 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -18,6 +18,7 @@ PY38 = sys.version_info >= (3, 8) PY39 = sys.version_info >= (3, 9) PYPY = platform.python_implementation() == "PyPy" +IS64 = sys.maxsize > 2 ** 32 # ---------------------------------------------------------------------------- diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 97b53a6e66575..c4db0170ecc90 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -13,7 +13,7 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json +from pandas import DataFrame, DatetimeIndex, Series, Timestamp, compat, read_json import pandas._testing as tm _seriesd = tm.getSeriesData() @@ -1257,7 +1257,7 @@ def test_to_json_large_numbers(self, bigNum): assert json == expected @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) - @pytest.mark.skipif(sys.maxsize <= 2 ** 32, reason="GH-35279") + @pytest.mark.skipif(not compat.IS64, reason="GH-35279") def test_read_json_large_numbers(self, bigNum): # GH20599 diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 952c583040360..f969cbca9f427 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -561,6 +561,7 @@ def test_encode_long_conversion(self): assert long_input == ujson.decode(output) @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) + @pytest.mark.xfail(not compat.IS64, reason="GH-35288") def test_dumps_ints_larger_than_maxsize(self, bigNum): # GH34395 bigNum = sys.maxsize + 1 diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index cbf3a778f9ae0..b36b11582c1ec 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -6,7 +6,7 @@ from pandas.util._test_decorators import async_mark import pandas as pd -from pandas import DataFrame, Series, Timestamp +from pandas import DataFrame, Series, Timestamp, compat import pandas._testing as tm from pandas.core.indexes.datetimes import date_range @@ -317,6 +317,7 @@ def test_resample_groupby_with_label(): tm.assert_frame_equal(result, expected) +@pytest.mark.xfail(not compat.IS64, reason="GH-35148") def test_consistency_with_window(): # consistent return values with window
We need MacPython to be passing for the wheels to be built.
https://api.github.com/repos/pandas-dev/pandas/pulls/35289
2020-07-15T15:34:14Z
2020-07-15T18:04:48Z
2020-07-15T18:04:48Z
2020-07-15T20:27:02Z
Fix indexing, reindex on all-sparse SparseArray.
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 814dbe999d5c1..1864367db0c0b 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1123,7 +1123,7 @@ Sparse - Creating a :class:`SparseArray` from timezone-aware dtype will issue a warning before dropping timezone information, instead of doing so silently (:issue:`32501`) - Bug in :meth:`arrays.SparseArray.from_spmatrix` wrongly read scipy sparse matrix (:issue:`31991`) - Bug in :meth:`Series.sum` with ``SparseArray`` raises ``TypeError`` (:issue:`25777`) -- Bug where :class:`DataFrame` containing :class:`SparseArray` filled with ``NaN`` when indexed by a list-like (:issue:`27781`, :issue:`29563`) +- Bug where :class:`DataFrame` containing an all-sparse :class:`SparseArray` filled with ``NaN`` when indexed by a list-like (:issue:`27781`, :issue:`29563`) - The repr of :class:`SparseDtype` now includes the repr of its ``fill_value`` attribute. Previously it used ``fill_value``'s string representation (:issue:`34352`) - Bug where empty :class:`DataFrame` could not be cast to :class:`SparseDtype` (:issue:`33113`) - Bug in :meth:`arrays.SparseArray` was returning the incorrect type when indexing a sparse dataframe with an iterable (:issue:`34526`, :issue:`34540`) diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index b18a58da3950f..1d675b54a9c62 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -862,21 +862,26 @@ def _take_with_fill(self, indices, fill_value=None) -> np.ndarray: else: raise IndexError("cannot do a non-empty take from an empty axes.") + # sp_indexer may be -1 for two reasons + # 1.) we took for an index of -1 (new) + # 2.) we took a value that was self.fill_value (old) sp_indexer = self.sp_index.lookup_array(indices) + new_fill_indices = indices == -1 + old_fill_indices = (sp_indexer == -1) & ~new_fill_indices - if self.sp_index.npoints == 0: + if self.sp_index.npoints == 0 and old_fill_indices.all(): + # We've looked up all valid points on an all-sparse array. + taken = np.full( + sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype + ) + + elif self.sp_index.npoints == 0: # Avoid taking from the empty self.sp_values _dtype = np.result_type(self.dtype.subtype, type(fill_value)) taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype) else: taken = self.sp_values.take(sp_indexer) - # sp_indexer may be -1 for two reasons - # 1.) we took for an index of -1 (new) - # 2.) we took a value that was self.fill_value (old) - new_fill_indices = indices == -1 - old_fill_indices = (sp_indexer == -1) & ~new_fill_indices - # Fill in two steps. # Old fill values # New fill values diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6a4b3318d3aa7..cc0f09ced7399 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1636,10 +1636,7 @@ def _holder(self): @property def fill_value(self): # Used in reindex_indexer - if is_sparse(self.values): - return self.values.dtype.fill_value - else: - return self.values.dtype.na_value + return self.values.dtype.na_value @property def _can_hold_na(self): diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index d0cdec712f39d..04215bfe1bedb 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -281,6 +281,11 @@ def test_take(self): exp = SparseArray(np.take(self.arr_data, [0, 1, 2])) tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp) + def test_take_all_empty(self): + a = pd.array([0, 0], dtype=pd.SparseDtype("int64")) + result = a.take([0, 1], allow_fill=True, fill_value=np.nan) + tm.assert_sp_array_equal(a, result) + def test_take_fill_value(self): data = np.array([1, np.nan, 0, 3, 0]) sparse = SparseArray(data, fill_value=0) diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py index 5d0ea69007e27..251376798efc3 100644 --- a/pandas/tests/extension/base/getitem.py +++ b/pandas/tests/extension/base/getitem.py @@ -399,31 +399,3 @@ def test_item(self, data): with pytest.raises(ValueError, match=msg): s.item() - - def test_boolean_mask_frame_fill_value(self, data): - # https://github.com/pandas-dev/pandas/issues/27781 - df = pd.DataFrame({"A": data}) - - mask = np.random.choice([True, False], df.shape[0]) - result = pd.isna(df.iloc[mask]["A"]) - expected = pd.isna(df["A"].iloc[mask]) - self.assert_series_equal(result, expected) - - mask = pd.Series(mask, index=df.index) - result = pd.isna(df.loc[mask]["A"]) - expected = pd.isna(df["A"].loc[mask]) - self.assert_series_equal(result, expected) - - def test_fancy_index_frame_fill_value(self, data): - # https://github.com/pandas-dev/pandas/issues/29563 - df = pd.DataFrame({"A": data}) - - mask = np.random.choice(df.shape[0], df.shape[0]) - result = pd.isna(df.iloc[mask]["A"]) - expected = pd.isna(df["A"].iloc[mask]) - self.assert_series_equal(result, expected) - - mask = pd.Series(mask, index=df.index) - result = pd.isna(df.loc[mask]["A"]) - expected = pd.isna(df["A"].loc[mask]) - self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 68e521b005c02..b411ca1c482a4 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -41,11 +41,6 @@ def data_for_twos(request): return SparseArray(np.ones(100) * 2) -@pytest.fixture(params=[0, np.nan]) -def data_zeros(request): - return SparseArray(np.zeros(100, dtype=int), fill_value=request.param) - - @pytest.fixture(params=[0, np.nan]) def data_missing(request): """Length 2 array with [NA, Valid]""" diff --git a/pandas/tests/frame/indexing/test_sparse.py b/pandas/tests/frame/indexing/test_sparse.py index 876fbe212c466..04e1c8b94c4d9 100644 --- a/pandas/tests/frame/indexing/test_sparse.py +++ b/pandas/tests/frame/indexing/test_sparse.py @@ -49,3 +49,23 @@ def test_locindexer_from_spmatrix(self, spmatrix_t, dtype): result = df.loc[itr_idx].dtypes.values expected = np.full(cols, SparseDtype(dtype, fill_value=0)) tm.assert_numpy_array_equal(result, expected) + + def test_reindex(self): + # https://github.com/pandas-dev/pandas/issues/35286 + df = pd.DataFrame( + {"A": [0, 1], "B": pd.array([0, 1], dtype=pd.SparseDtype("int64", 0))} + ) + result = df.reindex([0, 2]) + expected = pd.DataFrame( + { + "A": [0.0, np.nan], + "B": pd.array([0.0, np.nan], dtype=pd.SparseDtype("float64", 0.0)), + }, + index=[0, 2], + ) + tm.assert_frame_equal(result, expected) + + def test_all_sparse(self): + df = pd.DataFrame({"A": pd.array([0, 0], dtype=pd.SparseDtype("int64"))}) + result = df.loc[[0, 1]] + tm.assert_frame_equal(result, df)
Closes https://github.com/pandas-dev/pandas/issues/35286. Also added a regression tests for the issue reported there.
https://api.github.com/repos/pandas-dev/pandas/pulls/35287
2020-07-15T14:11:23Z
2020-07-16T11:17:07Z
2020-07-16T11:17:06Z
2020-07-16T11:17:10Z
To latex position
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index eb55369d83593..4fbc1d4b6965e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2840,6 +2840,7 @@ def to_latex( multirow=None, caption=None, label=None, + position=None, ): r""" Render object to a LaTeX tabular, longtable, or nested table/tabular. @@ -2925,6 +2926,9 @@ def to_latex( This is used with ``\ref{}`` in the main ``.tex`` file. .. versionadded:: 1.0.0 + position : str, optional + The LaTeX positional argument for tables, to be placed after + ``\begin{}`` in the output. %(returns)s See Also -------- @@ -2986,6 +2990,7 @@ def to_latex( multirow=multirow, caption=caption, label=label, + position=position, ) def to_csv( diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index fe85eab4bfbf5..bdb61974b15b1 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -931,6 +931,7 @@ def to_latex( multirow: bool = False, caption: Optional[str] = None, label: Optional[str] = None, + position: Optional[str] = None, ) -> Optional[str]: """ Render a DataFrame to a LaTeX tabular/longtable environment output. @@ -946,6 +947,7 @@ def to_latex( multirow=multirow, caption=caption, label=label, + position=position, ).get_result(buf=buf, encoding=encoding) def _format_col(self, i: int) -> List[str]: diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 3a3ca84642d51..5d6f0a08ef2b5 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -38,6 +38,7 @@ def __init__( multirow: bool = False, caption: Optional[str] = None, label: Optional[str] = None, + position: Optional[str] = None, ): self.fmt = formatter self.frame = self.fmt.frame @@ -50,6 +51,8 @@ def __init__( self.caption = caption self.label = label self.escape = self.fmt.escape + self.position = position + self._table_float = any(p is not None for p in (caption, label, position)) def write_result(self, buf: IO[str]) -> None: """ @@ -284,7 +287,7 @@ def _write_tabular_begin(self, buf, column_format: str): <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns """ - if self.caption is not None or self.label is not None: + if self._table_float: # then write output in a nested table/tabular environment if self.caption is None: caption_ = "" @@ -296,7 +299,12 @@ def _write_tabular_begin(self, buf, column_format: str): else: label_ = f"\n\\label{{{self.label}}}" - buf.write(f"\\begin{{table}}\n\\centering{caption_}{label_}\n") + if self.position is None: + position_ = "" + else: + position_ = f"[{self.position}]" + + buf.write(f"\\begin{{table}}{position_}\n\\centering{caption_}{label_}\n") else: # then write output only in a tabular environment pass @@ -317,7 +325,7 @@ def _write_tabular_end(self, buf): """ buf.write("\\bottomrule\n") buf.write("\\end{tabular}\n") - if self.caption is not None or self.label is not None: + if self._table_float: buf.write("\\end{table}\n") else: pass @@ -337,25 +345,29 @@ def _write_longtable_begin(self, buf, column_format: str): <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns """ - buf.write(f"\\begin{{longtable}}{{{column_format}}}\n") + if self.caption is None: + caption_ = "" + else: + caption_ = f"\\caption{{{self.caption}}}" - if self.caption is not None or self.label is not None: - if self.caption is None: - pass - else: - buf.write(f"\\caption{{{self.caption}}}") + if self.label is None: + label_ = "" + else: + label_ = f"\\label{{{self.label}}}" - if self.label is None: - pass - else: - buf.write(f"\\label{{{self.label}}}") + if self.position is None: + position_ = "" + else: + position_ = f"[{self.position}]" + buf.write( + f"\\begin{{longtable}}{position_}{{{column_format}}}\n{caption_}{label_}" + ) + if self.caption is not None or self.label is not None: # a double-backslash is required at the end of the line # as discussed here: # https://tex.stackexchange.com/questions/219138 buf.write("\\\\\n") - else: - pass @staticmethod def _write_longtable_end(buf): diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 509e5bcb33304..93ad3739e59c7 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -573,6 +573,54 @@ def test_to_latex_longtable_caption_label(self): """ assert result_cl == expected_cl + def test_to_latex_position(self): + the_position = "h" + + df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + + # test when only the position is provided + result_p = df.to_latex(position=the_position) + + expected_p = r"""\begin{table}[h] +\centering +\begin{tabular}{lrl} +\toprule +{} & a & b \\ +\midrule +0 & 1 & b1 \\ +1 & 2 & b2 \\ +\bottomrule +\end{tabular} +\end{table} +""" + assert result_p == expected_p + + def test_to_latex_longtable_position(self): + the_position = "t" + + df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + + # test when only the position is provided + result_p = df.to_latex(longtable=True, position=the_position) + + expected_p = r"""\begin{longtable}[t]{lrl} +\toprule +{} & a & b \\ +\midrule +\endhead +\midrule +\multicolumn{3}{r}{{Continued on next page}} \\ +\midrule +\endfoot + +\bottomrule +\endlastfoot +0 & 1 & b1 \\ +1 & 2 & b2 \\ +\end{longtable} +""" + assert result_p == expected_p + def test_to_latex_escape_special_chars(self): special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"] df = DataFrame(data=special_characters)
- [ ] closes #35281 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35284
2020-07-15T11:57:25Z
2020-08-07T15:32:27Z
2020-08-07T15:32:27Z
2020-08-10T06:20:08Z
BUG: GroupBy.count() and GroupBy.sum() incorreclty return NaN instead of 0 for missing categories
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 10dfd8406b8ce..260b92b5989c1 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -152,6 +152,7 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ +- Bug in :meth:`DataFrameGroupBy.count` and :meth:`SeriesGroupBy.sum` returning ``NaN`` for missing categories when grouped on multiple ``Categoricals``. Now returning ``0`` (:issue:`35028`) - Bug in :meth:`DataFrameGroupBy.apply` that would some times throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`) - - @@ -160,7 +161,7 @@ Groupby/resample/rolling Reshaping ^^^^^^^^^ -- +- Bug in :meth:`DataFrame.pivot_table` with ``aggfunc='count'`` or ``aggfunc='sum'`` returning ``NaN`` for missing categories when pivoted on a ``Categorical``. Now returning ``0`` (:issue:`31422`) - Sparse diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index c50b753cf3293..740463f0cf356 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1829,7 +1829,13 @@ def count(self): ) blocks = [make_block(val, placement=loc) for val, loc in zip(counted, locs)] - return self._wrap_agged_blocks(blocks, items=data.items) + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _wrap_agged_blocks() returns. GH 35028 + with com.temp_setattr(self, "observed", True): + result = self._wrap_agged_blocks(blocks, items=data.items) + + return self._reindex_output(result, fill_value=0) def nunique(self, dropna: bool = True): """ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 6c8a780859939..ed512710295d7 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1536,9 +1536,19 @@ def size(self) -> FrameOrSeriesUnion: @doc(_groupby_agg_method_template, fname="sum", no=True, mc=0) def sum(self, numeric_only: bool = True, min_count: int = 0): - return self._agg_general( - numeric_only=numeric_only, min_count=min_count, alias="add", npfunc=np.sum - ) + + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _agg_general() returns. GH #31422 + with com.temp_setattr(self, "observed", True): + result = self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="add", + npfunc=np.sum, + ) + + return self._reindex_output(result, fill_value=0) @doc(_groupby_agg_method_template, fname="prod", no=True, mc=0) def prod(self, numeric_only: bool = True, min_count: int = 0): diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 0d447a70b540d..c74c1529eb537 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -19,7 +19,7 @@ import pandas._testing as tm -def cartesian_product_for_groupers(result, args, names): +def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN): """ Reindex to a cartesian production for the groupers, preserving the nature (Categorical) of each grouper """ @@ -33,7 +33,7 @@ def f(a): return a index = MultiIndex.from_product(map(f, args), names=names) - return result.reindex(index).sort_index() + return result.reindex(index, fill_value=fill_value).sort_index() _results_for_groupbys_with_missing_categories = dict( @@ -309,7 +309,7 @@ def test_observed(observed): result = gb.sum() if not observed: expected = cartesian_product_for_groupers( - expected, [cat1, cat2, ["foo", "bar"]], list("ABC") + expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0 ) tm.assert_frame_equal(result, expected) @@ -319,7 +319,9 @@ def test_observed(observed): expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index) result = gb.sum() if not observed: - expected = cartesian_product_for_groupers(expected, [cat1, cat2], list("AB")) + expected = cartesian_product_for_groupers( + expected, [cat1, cat2], list("AB"), fill_value=0 + ) tm.assert_frame_equal(result, expected) @@ -1189,6 +1191,8 @@ def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation): ).sortlevel() expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C") + if operation == "agg": + expected = expected.fillna(0, downcast="infer") grouped = df_cat.groupby(["A", "B"], observed=observed)["C"] result = getattr(grouped, operation)(sum) tm.assert_series_equal(result, expected) @@ -1338,15 +1342,6 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( ) request.node.add_marker(mark) - if reduction_func == "sum": # GH 31422 - mark = pytest.mark.xfail( - reason=( - "sum should return 0 but currently returns NaN. " - "This is a known bug. See GH 31422." - ) - ) - request.node.add_marker(mark) - df = pd.DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), @@ -1367,8 +1362,11 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( val = result.loc[idx] assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan) - # If we expect unobserved values to be zero, we also expect the dtype to be int - if zero_or_nan == 0: + # If we expect unobserved values to be zero, we also expect the dtype to be int. + # Except for .sum(). If the observed categories sum to dtype=float (i.e. their + # sums have decimals), then the zeros for the missing categories should also be + # floats. + if zero_or_nan == 0 and reduction_func != "sum": assert np.issubdtype(result.dtype, np.integer) @@ -1410,24 +1408,6 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( if reduction_func == "ngroup": pytest.skip("ngroup does not return the Categories on the index") - if reduction_func == "count": # GH 35028 - mark = pytest.mark.xfail( - reason=( - "DataFrameGroupBy.count returns np.NaN for missing " - "categories, when it should return 0. See GH 35028" - ) - ) - request.node.add_marker(mark) - - if reduction_func == "sum": # GH 31422 - mark = pytest.mark.xfail( - reason=( - "sum should return 0 but currently returns NaN. " - "This is a known bug. See GH 31422." - ) - ) - request.node.add_marker(mark) - df = pd.DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index c07a5673fe503..67b3151b0ff9c 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1817,7 +1817,7 @@ def test_categorical_aggfunc(self, observed): ["A", "B", "C"], categories=["A", "B", "C"], ordered=False, name="C1" ) expected_columns = pd.Index(["a", "b"], name="C2") - expected_data = np.array([[1.0, np.nan], [1.0, np.nan], [np.nan, 2.0]]) + expected_data = np.array([[1, 0], [1, 0], [0, 2]], dtype=np.int64) expected = pd.DataFrame( expected_data, index=expected_index, columns=expected_columns ) @@ -1851,18 +1851,19 @@ def test_categorical_pivot_index_ordering(self, observed): values="Sales", index="Month", columns="Year", - dropna=observed, + observed=observed, aggfunc="sum", ) expected_columns = pd.Int64Index([2013, 2014], name="Year") expected_index = pd.CategoricalIndex( - ["January"], categories=months, ordered=False, name="Month" + months, categories=months, ordered=False, name="Month" ) + expected_data = [[320, 120]] + [[0, 0]] * 11 expected = pd.DataFrame( - [[320, 120]], index=expected_index, columns=expected_columns + expected_data, index=expected_index, columns=expected_columns ) - if not observed: - result = result.dropna().astype(np.int64) + if observed: + expected = expected.loc[["January"]] tm.assert_frame_equal(result, expected)
- [x] closes #31422 - [x] closes #35028 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry *Behavioural Changes* Fixing two related bugs: when grouping on multiple categoricals, `.sum()` and `.count()` would return `NaN` for the missing categories, but they are expected to return `0` for the missing categories. Both these bugs are fixed. *Tests* Tests were added in PR #35022 when these bugs were discovered and the tests were marked with an `xfail`. For this PR the `xfails` are removed and the tests are passing normally. As well, a few other existing tests were expecting `sum()` to return `NaN`; these have been updated so that the tests now expect to get `0` (which is the desired behaviour). *Pivot* The change to `.sum()` also impacts the `df.pivot_table()` if it is called with `aggfunc=sum` and is pivoted on a Categorical column with `observed=False`. This is not explicitly mentioned in either of the bugs, but it does make the behaviour consistent (i.e. the sum of a missing category is zero, not `NaN`). One test on test_pivot.py was updated to reflect this change.
https://api.github.com/repos/pandas-dev/pandas/pulls/35280
2020-07-15T01:40:16Z
2020-08-07T15:21:13Z
2020-08-07T15:21:12Z
2020-08-07T15:21:19Z
Revert "BUG: fix union_indexes not supporting sort=False for Index subclasses"
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index cfac916157649..97474af055c3c 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1113,7 +1113,6 @@ Reshaping - Fixed bug in :func:`melt` where melting MultiIndex columns with ``col_level`` > 0 would raise a ``KeyError`` on ``id_vars`` (:issue:`34129`) - Bug in :meth:`Series.where` with an empty Series and empty ``cond`` having non-bool dtype (:issue:`34592`) - Fixed regression where :meth:`DataFrame.apply` would raise ``ValueError`` for elements whth ``S`` dtype (:issue:`34529`) -- Bug in :meth:`DataFrame.append` leading to sorting columns even when ``sort=False`` is specified (:issue:`35092`) Sparse ^^^^^^ diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 9849742abcfca..4c5a70f4088ee 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -214,13 +214,7 @@ def conv(i): return result.union_many(indexes[1:]) else: for other in indexes[1:]: - # GH 35092. Index.union expects sort=None instead of sort=True - # to signify that sort=True isn't fully implemented and - # legacy implementation sometimes might not sort (see GH 24959) - # In this case we currently sort in _get_combined_index - if sort: - sort = None - result = result.union(other, sort=sort) + result = result.union(other) return result elif kind == "array": index = indexes[0] diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 17ac2307b9da6..1631342c359c1 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2578,13 +2578,11 @@ def test_construct_with_two_categoricalindex_series(self): index=pd.CategoricalIndex(["f", "female", "m", "male", "unknown"]), ) result = DataFrame([s1, s2]) - # GH 35092. Extra s2 columns are now appended to s1 columns - # in original order expected = DataFrame( np.array( - [[39.0, 6.0, 4.0, np.nan, np.nan], [152.0, 242.0, 150.0, 2.0, 2.0]] + [[np.nan, 39.0, np.nan, 6.0, 4.0], [2.0, 152.0, 2.0, 242.0, 150.0]] ), - columns=["female", "male", "unknown", "f", "m"], + columns=["f", "female", "m", "male", "unknown"], ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index c85696e02ad39..02a173eb4958d 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -13,9 +13,8 @@ from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd -from pandas import CategoricalIndex, Index, MultiIndex, RangeIndex +from pandas import CategoricalIndex, MultiIndex, RangeIndex import pandas._testing as tm -from pandas.core.indexes.api import union_indexes class TestCommon: @@ -396,18 +395,3 @@ def test_astype_preserves_name(self, index, dtype, copy): assert result.names == index.names else: assert result.name == index.name - - -@pytest.mark.parametrize("arr", [[0, 1, 4, 3]]) -@pytest.mark.parametrize("dtype", ["int8", "int16", "int32", "int64"]) -def test_union_index_no_sort(arr, sort, dtype): - # GH 35092. Check that we don't sort with sort=False - ind1 = Index(arr[:2], dtype=dtype) - ind2 = Index(arr[2:], dtype=dtype) - - # sort is None indicates that we sort the combined index - if sort is None: - arr.sort() - expected = Index(arr, dtype=dtype) - result = union_indexes([ind1, ind2], sort=sort) - tm.assert_index_equal(result, expected) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index ff95d8ad997a4..ffeb5ff0f8aaa 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2857,17 +2857,3 @@ def test_concat_frame_axis0_extension_dtypes(): result = pd.concat([df2, df1], ignore_index=True) expected = pd.DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64") tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("sort", [True, False]) -def test_append_sort(sort): - # GH 35092. Check that DataFrame.append respects the sort argument. - df1 = pd.DataFrame(data={0: [1, 2], 1: [3, 4]}) - df2 = pd.DataFrame(data={3: [1, 2], 2: [3, 4]}) - cols = list(df1.columns) + list(df2.columns) - if sort: - cols.sort() - - result = df1.append(df2, sort=sort).columns - expected = type(result)(cols) - tm.assert_index_equal(result, expected) diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 241721432bbf9..2b75a1ec6ca6e 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -732,11 +732,11 @@ def test_unbalanced(self): ) df["id"] = df.index exp_data = { - "X": ["X1", "X2", "X1", "X2"], - "A": [1.0, 2.0, 3.0, 4.0], - "B": [5.0, 6.0, np.nan, np.nan], - "id": [0, 1, 0, 1], - "year": [2010, 2010, 2011, 2011], + "X": ["X1", "X1", "X2", "X2"], + "A": [1.0, 3.0, 2.0, 4.0], + "B": [5.0, np.nan, 6.0, np.nan], + "id": [0, 0, 1, 1], + "year": [2010, 2011, 2010, 2011], } expected = pd.DataFrame(exp_data) expected = expected.set_index(["id", "year"])[["X", "A", "B"]] @@ -979,10 +979,10 @@ def test_nonnumeric_suffix(self): ) expected = pd.DataFrame( { - "A": ["X1", "X2", "X1", "X2"], - "colname": ["placebo", "placebo", "test", "test"], - "result": [5.0, 6.0, np.nan, np.nan], - "treatment": [1.0, 2.0, 3.0, 4.0], + "A": ["X1", "X1", "X2", "X2"], + "colname": ["placebo", "test", "placebo", "test"], + "result": [5.0, np.nan, 6.0, np.nan], + "treatment": [1.0, 3.0, 2.0, 4.0], } ) expected = expected.set_index(["A", "colname"]) @@ -1026,10 +1026,10 @@ def test_float_suffix(self): ) expected = pd.DataFrame( { - "A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"], - "colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1], - "result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan], - "treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0], + "A": ["X1", "X1", "X1", "X1", "X2", "X2", "X2", "X2"], + "colname": [1, 1.1, 1.2, 2.1, 1, 1.1, 1.2, 2.1], + "result": [0.0, np.nan, 5.0, np.nan, 9.0, np.nan, 6.0, np.nan], + "treatment": [np.nan, 1.0, np.nan, 3.0, np.nan, 2.0, np.nan, 4.0], } ) expected = expected.set_index(["A", "colname"]) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 3a4e54052305e..d9396d70f9112 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -636,15 +636,8 @@ def test_str_cat_align_mixed_inputs(self, join): # mixed list of indexed/unindexed u = np.array(["A", "B", "C", "D"]) expected_outer = Series(["aaA", "bbB", "c-C", "ddD", "-e-"]) - # joint index of rhs [t, u]; u will be forced have index of s - # GH 35092. If right join, maintain order of t.index - if join == "inner": - rhs_idx = t.index & s.index - elif join == "right": - rhs_idx = t.index.union(s.index, sort=False) - else: - rhs_idx = t.index | s.index + rhs_idx = t.index & s.index if join == "inner" else t.index | s.index expected = expected_outer.loc[s.index.join(rhs_idx, how=join)] result = s.str.cat([t, u], join=join, na_rep="-")
Reverts pandas-dev/pandas#35098 Closes https://github.com/pandas-dev/pandas/issues/35238 I'll also push a test from https://github.com/pandas-dev/pandas/issues/35238 here, and add the xfailing tests from #35098.
https://api.github.com/repos/pandas-dev/pandas/pulls/35277
2020-07-14T20:23:45Z
2020-07-15T12:24:40Z
2020-07-15T12:24:40Z
2020-07-15T12:31:17Z
CI: pin pytest in minimum versions
diff --git a/ci/deps/azure-36-minimum_versions.yaml b/ci/deps/azure-36-minimum_versions.yaml index 9f66f82720b5b..f5af7bcf36189 100644 --- a/ci/deps/azure-36-minimum_versions.yaml +++ b/ci/deps/azure-36-minimum_versions.yaml @@ -6,7 +6,7 @@ dependencies: # tools - cython=0.29.16 - - pytest>=5.0.1, <6.0.0rc0 + - pytest=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines
xref https://github.com/pandas-dev/pandas/pull/35260#discussion_r454293041
https://api.github.com/repos/pandas-dev/pandas/pulls/35274
2020-07-14T13:09:29Z
2020-07-14T13:56:50Z
2020-07-14T13:56:50Z
2020-07-14T13:56:57Z
Move API changes to appropriate sections
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 85b29a58a1f15..646a9c7b8c05d 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -276,6 +276,10 @@ change, as ``fsspec`` will still bring in the same packages as before. Other enhancements ^^^^^^^^^^^^^^^^^^ +- Added :class:`pandas.errors.InvalidIndexError` (:issue:`34570`). +- Added :meth:`DataFrame.value_counts` (:issue:`5377`) +- Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations. +- Added a :func:`pandas.api.indexers.VariableOffsetWindowIndexer` class to support ``rolling`` operations with non-fixed offsets (:issue:`34994`) - :class:`Styler` may now render CSS more efficiently where multiple cells have the same styling (:issue:`30876`) - :meth:`Styler.highlight_null` now accepts ``subset`` argument (:issue:`31345`) - When writing directly to a sqlite connection :func:`to_sql` now supports the ``multi`` method (:issue:`29921`) @@ -336,10 +340,12 @@ Other enhancements .. --------------------------------------------------------------------------- -.. _whatsnew_110.api: +.. _whatsnew_110.notable_bug_fixes: + +Notable bug fixes +~~~~~~~~~~~~~~~~~ -Backwards incompatible API changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +These are bug fixes that might have notable behavior changes. ``MultiIndex.get_indexer`` interprets `method` argument differently ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -403,7 +409,7 @@ And the differences in reindexing ``df`` with ``mi_2`` and using ``method='pad'` - -.. _whatsnew_110.api_breaking.indexing_raises_key_errors: +.. _whatsnew_110.notable_bug_fixes.indexing_raises_key_errors: Failed Label-Based Lookups Always Raise KeyError ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -473,7 +479,10 @@ key and type of :class:`Index`. These now consistently raise ``KeyError`` (:iss ... KeyError: Timestamp('1970-01-01 00:00:00') -.. _whatsnew_110.api_breaking.indexing_int_multiindex_raises_key_errors: + +Similarly, :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` instead of a ``ValueError`` if an incompatible key is passed, and ``KeyError`` if a missing key is passed, matching the behavior of ``.loc[]`` (:issue:`31722`) + +.. _whatsnew_110.notable_bug_fixes.indexing_int_multiindex_raises_key_errors: Failed Integer Lookups on MultiIndex Raise KeyError ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -531,7 +540,7 @@ those integer keys is not present in the first level of the index (:issue:`33539 .. --------------------------------------------------------------------------- -.. _whatsnew_110.api_breaking.assignment_to_multiple_columns: +.. _whatsnew_110.notable_bug_fixes.assignment_to_multiple_columns: Assignment to multiple columns of a DataFrame when some columns do not exist ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -562,7 +571,7 @@ Assignment to multiple columns of a :class:`DataFrame` when some of the columns df[['a', 'c']] = 1 df -.. _whatsnew_110.api_breaking.groupby_consistency: +.. _whatsnew_110.notable_bug_fixes.groupby_consistency: Consistency across groupby reductions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -628,7 +637,7 @@ The method :meth:`core.DataFrameGroupBy.size` would previously ignore ``as_index df.groupby("a", as_index=False).size() -.. _whatsnew_110.api_breaking.apply_applymap_first_once: +.. _whatsnew_110.notable_bug_fixes.apply_applymap_first_once: apply and applymap on ``DataFrame`` evaluates first row/column only once ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -673,34 +682,6 @@ Other API changes - :meth:`Series.describe` will now show distribution percentiles for ``datetime`` dtypes, statistics ``first`` and ``last`` will now be ``min`` and ``max`` to match with numeric dtypes in :meth:`DataFrame.describe` (:issue:`30164`) -- Added :meth:`DataFrame.value_counts` (:issue:`5377`) -- :meth:`Groupby.groups` now returns an abbreviated representation when called on large dataframes (:issue:`1135`) -- ``loc`` lookups with an object-dtype :class:`Index` and an integer key will now raise ``KeyError`` instead of ``TypeError`` when key is missing (:issue:`31905`) -- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median``, ``skew``, ``cov``, ``corr`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`) -- Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations. -- Added a :func:`pandas.api.indexers.VariableOffsetWindowIndexer` class to support ``rolling`` operations with non-fixed offsets (:issue:`34994`) -- Added :class:`pandas.errors.InvalidIndexError` (:issue:`34570`). -- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`. - Previously an ``AttributeError`` was raised (:issue:`31126`) -- :meth:`DataFrame.xs` now raises a ``TypeError`` if a ``level`` keyword is supplied and the axis is not a :class:`MultiIndex`. - Previously an ``AttributeError`` was raised (:issue:`33610`) -- :meth:`DataFrameGroupby.mean` and :meth:`SeriesGroupby.mean` (and similarly for :meth:`~DataFrameGroupby.median`, :meth:`~DataFrameGroupby.std` and :meth:`~DataFrameGroupby.var`) - now raise a ``TypeError`` if a not-accepted keyword argument is passed into it. - Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median`) (:issue:`31485`) -- :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` instead of a ``ValueError`` if an incompatible key is passed, and ``KeyError`` if a missing key is passed, matching the behavior of ``.loc[]`` (:issue:`31722`) -- Passing an integer dtype other than ``int64`` to ``np.array(period_index, dtype=...)`` will now raise ``TypeError`` instead of incorrectly using ``int64`` (:issue:`32255`) -- Passing an invalid ``fill_value`` to :meth:`Categorical.take` raises a ``ValueError`` instead of ``TypeError`` (:issue:`33660`) -- Combining a ``Categorical`` with integer categories and which contains missing values - with a float dtype column in operations such as :func:`concat` or :meth:`~DataFrame.append` - will now result in a float column instead of an object dtyped column (:issue:`33607`) -- :meth:`Series.to_timestamp` now raises a ``TypeError`` if the axis is not a :class:`PeriodIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) -- :meth:`Series.to_period` now raises a ``TypeError`` if the axis is not a :class:`DatetimeIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) -- :func: `pandas.api.dtypes.is_string_dtype` no longer incorrectly identifies categorical series as string. -- :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` - (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) -- :class:`Period` no longer accepts tuples for the ``freq`` argument (:issue:`34658`) -- :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`) -- The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`). Increased minimum versions for dependencies @@ -868,6 +849,8 @@ Bug fixes Categorical ^^^^^^^^^^^ +- Passing an invalid ``fill_value`` to :meth:`Categorical.take` raises a ``ValueError`` instead of ``TypeError`` (:issue:`33660`) +- Combining a ``Categorical`` with integer categories and which contains missing values with a float dtype column in operations such as :func:`concat` or :meth:`~DataFrame.append` will now result in a float column instead of an object dtyped column (:issue:`33607`) - Bug where :func:`merge` was unable to join on non-unique categorical indices (:issue:`28189`) - Bug when passing categorical data to :class:`Index` constructor along with ``dtype=object`` incorrectly returning a :class:`CategoricalIndex` instead of object-dtype :class:`Index` (:issue:`32167`) - Bug where :class:`Categorical` comparison operator ``__ne__`` would incorrectly evaluate to ``False`` when either element was missing (:issue:`32276`) @@ -877,6 +860,10 @@ Categorical Datetimelike ^^^^^^^^^^^^ +- Passing an integer dtype other than ``int64`` to ``np.array(period_index, dtype=...)`` will now raise ``TypeError`` instead of incorrectly using ``int64`` (:issue:`32255`) +- :meth:`Series.to_timestamp` now raises a ``TypeError`` if the axis is not a :class:`PeriodIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) +- :meth:`Series.to_period` now raises a ``TypeError`` if the axis is not a :class:`DatetimeIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) +- :class:`Period` no longer accepts tuples for the ``freq`` argument (:issue:`34658`) - Bug in :class:`Timestamp` where constructing :class:`Timestamp` from ambiguous epoch time and calling constructor again changed :meth:`Timestamp.value` property (:issue:`24329`) - :meth:`DatetimeArray.searchsorted`, :meth:`TimedeltaArray.searchsorted`, :meth:`PeriodArray.searchsorted` not recognizing non-pandas scalars and incorrectly raising ``ValueError`` instead of ``TypeError`` (:issue:`30950`) - Bug in :class:`Timestamp` where constructing :class:`Timestamp` with dateutil timezone less than 128 nanoseconds before daylight saving time switch from winter to summer would result in nonexistent time (:issue:`31043`) @@ -944,7 +931,7 @@ Strings - Bug in the :meth:`~Series.astype` method when converting "string" dtype data to nullable integer dtype (:issue:`32450`). - Fixed issue where taking ``min`` or ``max`` of a ``StringArray`` or ``Series`` with ``StringDtype`` type would raise. (:issue:`31746`) - Bug in :meth:`Series.str.cat` returning ``NaN`` output when other had :class:`Index` type (:issue:`33425`) - +- :func: `pandas.api.dtypes.is_string_dtype` no longer incorrectly identifies categorical series as string. Interval ^^^^^^^^ @@ -953,6 +940,8 @@ Interval Indexing ^^^^^^^^ + +- :meth:`DataFrame.xs` now raises a ``TypeError`` if a ``level`` keyword is supplied and the axis is not a :class:`MultiIndex`. Previously an ``AttributeError`` was raised (:issue:`33610`) - Bug in slicing on a :class:`DatetimeIndex` with a partial-timestamp dropping high-resolution indices near the end of a year, quarter, or month (:issue:`31064`) - Bug in :meth:`PeriodIndex.get_loc` treating higher-resolution strings differently from :meth:`PeriodIndex.get_value` (:issue:`31172`) - Bug in :meth:`Series.at` and :meth:`DataFrame.at` not matching ``.loc`` behavior when looking up an integer in a :class:`Float64Index` (:issue:`31329`) @@ -996,6 +985,8 @@ Missing MultiIndex ^^^^^^^^^^ + +- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`. Previously an ``AttributeError`` was raised (:issue:`31126`) - Bug in :meth:`Dataframe.loc` when used with a :class:`MultiIndex`. The returned values were not in the same order as the given inputs (:issue:`22797`) .. ipython:: python @@ -1057,6 +1048,7 @@ I/O - Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`) - Bug in :meth:`read_excel` that was raising a ``TypeError`` when ``header=None`` and ``index_col`` given as list (:issue:`31783`) - Bug in "meth"`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`) +- :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) Plotting ^^^^^^^^ @@ -1072,6 +1064,8 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ +- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median``, ``skew``, ``cov``, ``corr`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`) +- :meth:`DataFrameGroupby.mean` and :meth:`SeriesGroupby.mean` (and similarly for :meth:`~DataFrameGroupby.median`, :meth:`~DataFrameGroupby.std` and :meth:`~DataFrameGroupby.var`) now raise a ``TypeError`` if a not-accepted keyword argument is passed into it. Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median`) (:issue:`31485`) - Bug in :meth:`GroupBy.apply` raises ``ValueError`` when the ``by`` axis is not sorted and has duplicates and the applied ``func`` does not mutate passed in objects (:issue:`30667`) - Bug in :meth:`DataFrameGroupby.transform` produces incorrect result with transformation functions (:issue:`30918`) - Bug in :meth:`Groupby.transform` was returning the wrong result when grouping by multiple keys of which some were categorical and others not (:issue:`32494`) @@ -1153,6 +1147,9 @@ ExtensionArray Other ^^^^^ + +- The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`). +- :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`) - Appending a dictionary to a :class:`DataFrame` without passing ``ignore_index=True`` will raise ``TypeError: Can only append a dict if ignore_index=True`` instead of ``TypeError: Can only append a Series if ignore_index=True or if the Series has a name`` (:issue:`30871`) - Set operations on an object-dtype :class:`Index` now always return object-dtype results (:issue:`31401`)
xref https://github.com/pandas-dev/pandas/issues/34801
https://api.github.com/repos/pandas-dev/pandas/pulls/35273
2020-07-14T12:35:38Z
2020-07-14T17:09:42Z
2020-07-14T17:09:42Z
2020-07-14T17:10:00Z
CI: Unpin pytest
diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml index 2dc53f8181ac4..15704cf0d5427 100644 --- a/ci/deps/azure-36-32bit.yaml +++ b/ci/deps/azure-36-32bit.yaml @@ -23,4 +23,4 @@ dependencies: - pip - pip: - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml index d31015fde4741..a9b9a5a47ccf5 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-36-locale.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio - hypothesis>=3.58.0 diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml index 23121b985492e..c086b3651afc3 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-36-locale_slow.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-36-slow.yaml b/ci/deps/azure-36-slow.yaml index 0a6d1d13c8549..87bad59fa4873 100644 --- a/ci/deps/azure-36-slow.yaml +++ b/ci/deps/azure-36-slow.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 4dbb6a5344976..6f64c81f299d1 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -6,7 +6,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index 451fb5884a4af..5cb58756a6ac1 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.* # tools - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-36.yaml index 81a27465f9e61..eeea249a19ca1 100644 --- a/ci/deps/azure-macos-36.yaml +++ b/ci/deps/azure-macos-36.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.6.* # tools - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 4d7e1d821037b..548660cabaa67 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 34fca631df6c1..5bbd0e2795d7e 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index 5f5ea8034cddf..177e0d3f4c0af 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-cov # this is only needed in the coverage build diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index 6bc4aba733ee5..03a1e751b6a86 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml index f434a03609b26..5cb53489be225 100644 --- a/ci/deps/travis-37-arm64.yaml +++ b/ci/deps/travis-37-arm64.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.13 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index aaf706d61fe5c..e896233aac63c 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-38.yaml b/ci/deps/travis-38.yaml index ac39a223cd086..b879c0f81dab2 100644 --- a/ci/deps/travis-38.yaml +++ b/ci/deps/travis-38.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/environment.yml b/environment.yml index 53222624619de..3b088ca511be9 100644 --- a/environment.yml +++ b/environment.yml @@ -52,7 +52,7 @@ dependencies: - botocore>=1.11 - hypothesis>=3.82 - moto # mock S3 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-cov - pytest-xdist>=1.21 - pytest-asyncio diff --git a/pandas/_testing.py b/pandas/_testing.py index fc6df7a95e348..1cf9304ed2715 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -9,7 +9,7 @@ from shutil import rmtree import string import tempfile -from typing import Any, Callable, List, Optional, Type, Union, cast +from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast import warnings import zipfile @@ -2880,9 +2880,7 @@ def convert_rows_list_to_csv_str(rows_list: List[str]): return expected -def external_error_raised( - expected_exception: Type[Exception], -) -> Callable[[Type[Exception], None], None]: +def external_error_raised(expected_exception: Type[Exception],) -> ContextManager: """ Helper function to mark pytest.raises that have an external error message. diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 7e4513da37dc9..0d447a70b540d 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1294,9 +1294,7 @@ def test_get_nonexistent_category(): ) -def test_series_groupby_on_2_categoricals_unobserved( - reduction_func: str, observed: bool, request -): +def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request): # GH 17605 if reduction_func == "ngroup": pytest.skip("ngroup is not truly a reduction") @@ -1326,7 +1324,7 @@ def test_series_groupby_on_2_categoricals_unobserved( def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( - reduction_func: str, request + reduction_func, request ): # GH 17605 # Tests whether the unobserved categories in the result contain 0 or NaN @@ -1374,7 +1372,7 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( assert np.issubdtype(result.dtype, np.integer) -def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func: str): +def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func): # GH 23865 # GH 27075 # Ensure that df.groupby, when 'by' is two pd.Categorical variables, @@ -1402,7 +1400,7 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_fun @pytest.mark.parametrize("observed", [False, None]) def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( - reduction_func: str, observed: bool, request + reduction_func, observed, request ): # GH 23865 # GH 27075 diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index a4a1d83177c50..bdf633839b2cd 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -120,7 +120,9 @@ def _skip_if_no_scipy() -> bool: ) -def skip_if_installed(package: str) -> Callable: +# TODO: return type, _pytest.mark.structures.MarkDecorator is not public +# https://github.com/pytest-dev/pytest/issues/7469 +def skip_if_installed(package: str): """ Skip a test if a package is installed. @@ -134,7 +136,9 @@ def skip_if_installed(package: str) -> Callable: ) -def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable: +# TODO: return type, _pytest.mark.structures.MarkDecorator is not public +# https://github.com/pytest-dev/pytest/issues/7469 +def skip_if_no(package: str, min_version: Optional[str] = None): """ Generic function to help skip tests when required packages are not present on the testing system. @@ -196,14 +200,12 @@ def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable: ) -def skip_if_np_lt( - ver_str: str, reason: Optional[str] = None, *args, **kwds -) -> Callable: +# TODO: return type, _pytest.mark.structures.MarkDecorator is not public +# https://github.com/pytest-dev/pytest/issues/7469 +def skip_if_np_lt(ver_str: str, *args, reason: Optional[str] = None): if reason is None: reason = f"NumPy {ver_str} or greater required" - return pytest.mark.skipif( - _np_version < LooseVersion(ver_str), reason=reason, *args, **kwds - ) + return pytest.mark.skipif(_np_version < LooseVersion(ver_str), *args, reason=reason) def parametrize_fixture_doc(*args): diff --git a/requirements-dev.txt b/requirements-dev.txt index 0c024d1b54637..7bf3df176b378 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -32,7 +32,7 @@ boto3 botocore>=1.11 hypothesis>=3.82 moto -pytest>=5.0.1,<6.0.0rc0 +pytest>=5.0.1 pytest-cov pytest-xdist>=1.21 pytest-asyncio diff --git a/setup.cfg b/setup.cfg index 00af7f6f1b79a..ee5725e36d193 100644 --- a/setup.cfg +++ b/setup.cfg @@ -105,7 +105,7 @@ known_dtypes = pandas.core.dtypes known_post_core = pandas.tseries,pandas.io,pandas.plotting sections = FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER known_first_party = pandas -known_third_party = _pytest,announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,validate_unwanted_patterns,yaml,odf +known_third_party = announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,validate_unwanted_patterns,yaml,odf multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0
- [ ] closes #35261 have made a start working through the failures. we should be able to get mypy to green here and then when pytest 6.0.0 is released the upstream fixes should cause ci to go red since we have `warn_unused_ignores = True` in setup.cfg.
https://api.github.com/repos/pandas-dev/pandas/pulls/35272
2020-07-14T08:35:44Z
2020-07-29T15:59:29Z
2020-07-29T15:59:28Z
2020-07-30T12:20:14Z
REGR: revert ExtensionBlock.set to be in-place
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 55e2a810e6fc3..0e16340ed58e3 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -999,7 +999,6 @@ Indexing - Bug in :meth:`Series.__getitem__` indexing with non-standard scalars, e.g. ``np.dtype`` (:issue:`32684`) - Bug in :class:`Index` constructor where an unhelpful error message was raised for ``numpy`` scalars (:issue:`33017`) - Bug in :meth:`DataFrame.lookup` incorrectly raising an ``AttributeError`` when ``frame.index`` or ``frame.columns`` is not unique; this will now raise a ``ValueError`` with a helpful error message (:issue:`33041`) -- Bug in :meth:`DataFrame.iloc.__setitem__` creating a new array instead of overwriting ``Categorical`` values in-place (:issue:`32831`) - Bug in :class:`Interval` where a :class:`Timedelta` could not be added or subtracted from a :class:`Timestamp` interval (:issue:`32023`) - Bug in :meth:`DataFrame.copy` _item_cache not invalidated after copy causes post-copy value updates to not be reflected (:issue:`31784`) - Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` throwing an error when a ``datetime64[ns, tz]`` value is provided (:issue:`32395`) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index cc0f09ced7399..6ca6eca1ff829 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1589,7 +1589,7 @@ def should_store(self, value: ArrayLike) -> bool: def set(self, locs, values): assert locs.tolist() == [0] - self.values[:] = values + self.values = values def putmask( self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False, diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index c5f40102874dd..4fae01ec710fd 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -694,6 +694,7 @@ def test_series_indexing_zerodim_np_array(self): result = s.iloc[np.array(0)] assert result == 1 + @pytest.mark.xfail(reason="https://github.com/pandas-dev/pandas/issues/33457") def test_iloc_setitem_categorical_updates_inplace(self): # Mixed dtype ensures we go through take_split_path in setitem_with_indexer cat = pd.Categorical(["A", "B", "C"]) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index ced70069dd955..5b7f013d5de31 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -1100,3 +1100,13 @@ def test_long_text_missing_labels_inside_loc_error_message_limited(): error_message_regex = "long_missing_label_text_0.*\\\\n.*long_missing_label_text_1" with pytest.raises(KeyError, match=error_message_regex): s.loc[["a", "c"] + missing_labels] + + +def test_setitem_categorical(): + # https://github.com/pandas-dev/pandas/issues/35369 + df = pd.DataFrame({"h": pd.Series(list("mn")).astype("category")}) + df.h = df.h.cat.reorder_categories(["n", "m"]) + expected = pd.DataFrame( + {"h": pd.Categorical(["m", "n"]).reorder_categories(["n", "m"])} + ) + tm.assert_frame_equal(df, expected)
Alternative for https://github.com/pandas-dev/pandas/pull/35266 and closes #35369
https://api.github.com/repos/pandas-dev/pandas/pulls/35271
2020-07-14T08:01:29Z
2020-07-27T18:00:06Z
2020-07-27T18:00:06Z
2020-08-18T13:40:30Z
CI: Ignore setuptools distutils warning
diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml index 15704cf0d5427..2dc53f8181ac4 100644 --- a/ci/deps/azure-36-32bit.yaml +++ b/ci/deps/azure-36-32bit.yaml @@ -23,4 +23,4 @@ dependencies: - pip - pip: - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml index a9b9a5a47ccf5..d31015fde4741 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-36-locale.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - pytest-asyncio - hypothesis>=3.58.0 diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml index c086b3651afc3..23121b985492e 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-36-locale_slow.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-36-minimum_versions.yaml b/ci/deps/azure-36-minimum_versions.yaml index f5af7bcf36189..9f66f82720b5b 100644 --- a/ci/deps/azure-36-minimum_versions.yaml +++ b/ci/deps/azure-36-minimum_versions.yaml @@ -6,7 +6,7 @@ dependencies: # tools - cython=0.29.16 - - pytest=5.0.1 + - pytest>=5.0.1, <6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-36-slow.yaml b/ci/deps/azure-36-slow.yaml index 87bad59fa4873..0a6d1d13c8549 100644 --- a/ci/deps/azure-36-slow.yaml +++ b/ci/deps/azure-36-slow.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 81e336cf1ed7f..714e1100b1e1a 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -6,7 +6,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - pytest-asyncio - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index 5cb58756a6ac1..451fb5884a4af 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.* # tools - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-36.yaml index eeea249a19ca1..81a27465f9e61 100644 --- a/ci/deps/azure-macos-36.yaml +++ b/ci/deps/azure-macos-36.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.6.* # tools - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 548660cabaa67..4d7e1d821037b 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 5bbd0e2795d7e..34fca631df6c1 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index 177e0d3f4c0af..5f5ea8034cddf 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-cov # this is only needed in the coverage build diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index 03a1e751b6a86..6bc4aba733ee5 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml index 5cb53489be225..f434a03609b26 100644 --- a/ci/deps/travis-37-arm64.yaml +++ b/ci/deps/travis-37-arm64.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.13 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index e896233aac63c..aaf706d61fe5c 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-38.yaml b/ci/deps/travis-38.yaml index b879c0f81dab2..ac39a223cd086 100644 --- a/ci/deps/travis-38.yaml +++ b/ci/deps/travis-38.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/environment.yml b/environment.yml index 80dbffebf6b9d..32ff8c91cb69c 100644 --- a/environment.yml +++ b/environment.yml @@ -51,7 +51,7 @@ dependencies: - botocore>=1.11 - hypothesis>=3.82 - moto # mock S3 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-cov - pytest-xdist>=1.21 - pytest-asyncio diff --git a/pandas/tests/util/test_show_versions.py b/pandas/tests/util/test_show_versions.py index e36ea662fac8b..04e841c05e44a 100644 --- a/pandas/tests/util/test_show_versions.py +++ b/pandas/tests/util/test_show_versions.py @@ -21,6 +21,10 @@ # pandas_datareader "ignore:pandas.util.testing is deprecated:FutureWarning" ) +@pytest.mark.filterwarnings( + # https://github.com/pandas-dev/pandas/issues/35252 + "ignore:Distutils:UserWarning" +) def test_show_versions(capsys): # gh-32041 pd.show_versions() diff --git a/requirements-dev.txt b/requirements-dev.txt index 886f400caf44f..3cda38d4b72f5 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -32,7 +32,7 @@ boto3 botocore>=1.11 hypothesis>=3.82 moto -pytest>=5.0.1 +pytest>=5.0.1,<6.0.0rc0 pytest-cov pytest-xdist>=1.21 pytest-asyncio
xref https://github.com/pandas-dev/pandas/issues/35252. Just ignoring in the test. Can discuss a proper solution in the issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/35260
2020-07-13T12:12:36Z
2020-07-13T15:27:00Z
2020-07-13T15:27:00Z
2020-07-14T13:15:47Z
ENH: Basis for a StringDtype using Arrow
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index f6d2d6e63340f..1ca18bae4e2c4 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -636,7 +636,7 @@ cpdef ndarray[object] ensure_string_array( ---------- arr : array-like The values to be converted to str, if needed. - na_value : Any + na_value : Any, default np.nan The value to use for na. For example, np.nan or pd.NA. convert_na_value : bool, default True If False, existing na values will be used unchanged in the new array. diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 0968545a6b8a4..6884d03f9c5aa 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -468,6 +468,7 @@ def astype(self, dtype, copy=True): NumPy ndarray with 'dtype' for its dtype. """ from pandas.core.arrays.string_ import StringDtype + from pandas.core.arrays.string_arrow import ArrowStringDtype dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, self.dtype): @@ -475,7 +476,11 @@ def astype(self, dtype, copy=True): return self else: return self.copy() - if isinstance(dtype, StringDtype): # allow conversion to StringArrays + + # FIXME: Really hard-code here? + if isinstance( + dtype, (ArrowStringDtype, StringDtype) + ): # allow conversion to StringArrays return dtype.construct_array_type()._from_sequence(self, copy=False) return np.array(self, dtype=dtype, copy=copy) diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py new file mode 100644 index 0000000000000..184fbc050036b --- /dev/null +++ b/pandas/core/arrays/string_arrow.py @@ -0,0 +1,625 @@ +from __future__ import annotations + +from distutils.version import LooseVersion +from typing import TYPE_CHECKING, Any, Sequence, Type, Union + +import numpy as np + +from pandas._libs import lib, missing as libmissing +from pandas.util._validators import validate_fillna_kwargs + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.dtypes import register_extension_dtype +from pandas.core.dtypes.missing import isna + +from pandas.api.types import ( + is_array_like, + is_bool_dtype, + is_integer, + is_integer_dtype, + is_scalar, +) +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays.base import ExtensionArray +from pandas.core.indexers import check_array_indexer, validate_indices +from pandas.core.missing import get_fill_func + +try: + import pyarrow as pa +except ImportError: + pa = None +else: + # our min supported version of pyarrow, 0.15.1, does not have a compute + # module + try: + import pyarrow.compute as pc + except ImportError: + pass + else: + ARROW_CMP_FUNCS = { + "eq": pc.equal, + "ne": pc.not_equal, + "lt": pc.less, + "gt": pc.greater, + "le": pc.less_equal, + "ge": pc.greater_equal, + } + + +if TYPE_CHECKING: + from pandas import Series + + +@register_extension_dtype +class ArrowStringDtype(ExtensionDtype): + """ + Extension dtype for string data in a ``pyarrow.ChunkedArray``. + + .. versionadded:: 1.2.0 + + .. warning:: + + ArrowStringDtype is considered experimental. The implementation and + parts of the API may change without warning. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> from pandas.core.arrays.string_arrow import ArrowStringDtype + >>> ArrowStringDtype() + ArrowStringDtype + """ + + name = "arrow_string" + + #: StringDtype.na_value uses pandas.NA + na_value = libmissing.NA + + @property + def type(self) -> Type[str]: + return str + + @classmethod + def construct_array_type(cls) -> Type["ArrowStringArray"]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return ArrowStringArray + + def __hash__(self) -> int: + return hash("ArrowStringDtype") + + def __repr__(self) -> str: + return "ArrowStringDtype" + + def __from_arrow__( + self, array: Union["pa.Array", "pa.ChunkedArray"] + ) -> "ArrowStringArray": + """ + Construct StringArray from pyarrow Array/ChunkedArray. + """ + return ArrowStringArray(array) + + def __eq__(self, other) -> bool: + """Check whether 'other' is equal to self. + + By default, 'other' is considered equal if + * it's a string matching 'self.name'. + * it's an instance of this type. + + Parameters + ---------- + other : Any + + Returns + ------- + bool + """ + if isinstance(other, ArrowStringDtype): + return True + elif isinstance(other, str) and other == "arrow_string": + return True + else: + return False + + +class ArrowStringArray(OpsMixin, ExtensionArray): + """ + Extension array for string data in a ``pyarrow.ChunkedArray``. + + .. versionadded:: 1.2.0 + + .. warning:: + + ArrowStringArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : pyarrow.Array or pyarrow.ChunkedArray + The array of data. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + array + The recommended function for creating a ArrowStringArray. + Series.str + The string methods are available on Series backed by + a ArrowStringArray. + + Notes + ----- + ArrowStringArray returns a BooleanArray for comparison methods. + + Examples + -------- + >>> pd.array(['This is', 'some text', None, 'data.'], dtype="arrow_string") + <ArrowStringArray> + ['This is', 'some text', <NA>, 'data.'] + Length: 4, dtype: arrow_string + """ + + _dtype = ArrowStringDtype() + + def __init__(self, values): + self._chk_pyarrow_available() + if isinstance(values, pa.Array): + self._data = pa.chunked_array([values]) + elif isinstance(values, pa.ChunkedArray): + self._data = values + else: + raise ValueError(f"Unsupported type '{type(values)}' for ArrowStringArray") + + if not pa.types.is_string(self._data.type): + raise ValueError( + "ArrowStringArray requires a PyArrow (chunked) array of string type" + ) + + @classmethod + def _chk_pyarrow_available(cls) -> None: + # TODO: maybe update import_optional_dependency to allow a minimum + # version to be specified rather than use the global minimum + if pa is None or LooseVersion(pa.__version__) < "1.0.0": + msg = "pyarrow>=1.0.0 is required for PyArrow backed StringArray." + raise ImportError(msg) + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + cls._chk_pyarrow_available() + # convert non-na-likes to str, and nan-likes to ArrowStringDtype.na_value + scalars = lib.ensure_string_array(scalars, copy=False) + return cls(pa.array(scalars, type=pa.string(), from_pandas=True)) + + @classmethod + def _from_sequence_of_strings(cls, strings, dtype=None, copy=False): + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + @property + def dtype(self) -> ArrowStringDtype: + """ + An instance of 'ArrowStringDtype'. + """ + return self._dtype + + def __array__(self, dtype=None) -> np.ndarray: + """Correctly construct numpy arrays when passed to `np.asarray()`.""" + return self.to_numpy(dtype=dtype) + + def __arrow_array__(self, type=None): + """Convert myself to a pyarrow Array or ChunkedArray.""" + return self._data + + def to_numpy( + self, dtype=None, copy: bool = False, na_value=lib.no_default + ) -> np.ndarray: + """ + Convert to a NumPy ndarray. + """ + # TODO: copy argument is ignored + + if na_value is lib.no_default: + na_value = self._dtype.na_value + result = self._data.__array__(dtype=dtype) + result[isna(result)] = na_value + return result + + def __len__(self) -> int: + """ + Length of this array. + + Returns + ------- + length : int + """ + return len(self._data) + + @classmethod + def _from_factorized(cls, values, original): + return cls._from_sequence(values) + + @classmethod + def _concat_same_type(cls, to_concat) -> ArrowStringArray: + """ + Concatenate multiple ArrowStringArray. + + Parameters + ---------- + to_concat : sequence of ArrowStringArray + + Returns + ------- + ArrowStringArray + """ + return cls( + pa.chunked_array( + [array for ea in to_concat for array in ea._data.iterchunks()] + ) + ) + + def __getitem__(self, item: Any) -> Any: + """Select a subset of self. + + Parameters + ---------- + item : int, slice, or ndarray + * int: The position in 'self' to get. + * slice: A slice object, where 'start', 'stop', and 'step' are + integers or None + * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' + + Returns + ------- + item : scalar or ExtensionArray + + Notes + ----- + For scalar ``item``, return a scalar value suitable for the array's + type. This should be an instance of ``self.dtype.type``. + For slice ``key``, return an instance of ``ExtensionArray``, even + if the slice is length 0 or 1. + For a boolean mask, return an instance of ``ExtensionArray``, filtered + to the values where ``item`` is True. + """ + item = check_array_indexer(self, item) + + if isinstance(item, np.ndarray): + if not len(item): + return type(self)(pa.chunked_array([], type=pa.string())) + elif is_integer_dtype(item.dtype): + return self.take(item) + elif is_bool_dtype(item.dtype): + return type(self)(self._data.filter(item)) + else: + raise IndexError( + "Only integers, slices and integer or " + "boolean arrays are valid indices." + ) + + # We are not an array indexer, so maybe e.g. a slice or integer + # indexer. We dispatch to pyarrow. + value = self._data[item] + if isinstance(value, pa.ChunkedArray): + return type(self)(value) + else: + return self._as_pandas_scalar(value) + + def _as_pandas_scalar(self, arrow_scalar: pa.Scalar): + scalar = arrow_scalar.as_py() + if scalar is None: + return self._dtype.na_value + else: + return scalar + + def fillna(self, value=None, method=None, limit=None): + """ + Fill NA/NaN values using the specified method. + + Parameters + ---------- + value : scalar, array-like + If a scalar value is passed it is used to fill all missing values. + Alternatively, an array-like 'value' can be given. It's expected + that the array-like have the same length as 'self'. + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap. + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. + + Returns + ------- + ExtensionArray + With NA/NaN filled. + """ + value, method = validate_fillna_kwargs(value, method) + + mask = self.isna() + + if is_array_like(value): + if len(value) != len(self): + raise ValueError( + f"Length of 'value' does not match. Got ({len(value)}) " + f"expected {len(self)}" + ) + value = value[mask] + + if mask.any(): + if method is not None: + func = get_fill_func(method) + new_values = func(self.to_numpy(object), limit=limit, mask=mask) + new_values = self._from_sequence(new_values) + else: + # fill with value + new_values = self.copy() + new_values[mask] = value + else: + new_values = self.copy() + return new_values + + def _reduce(self, name, skipna=True, **kwargs): + if name in ["min", "max"]: + return getattr(self, name)(skipna=skipna) + + raise TypeError(f"Cannot perform reduction '{name}' with string dtype") + + @property + def nbytes(self) -> int: + """ + The number of bytes needed to store this object in memory. + """ + return self._data.nbytes + + def isna(self) -> np.ndarray: + """ + Boolean NumPy array indicating if each value is missing. + + This should return a 1-D array the same length as 'self'. + """ + # TODO: Implement .to_numpy for ChunkedArray + return self._data.is_null().to_pandas().values + + def copy(self) -> ArrowStringArray: + """ + Return a shallow copy of the array. + + Returns + ------- + ArrowStringArray + """ + return type(self)(self._data) + + def _cmp_method(self, other, op): + from pandas.arrays import BooleanArray + + pc_func = ARROW_CMP_FUNCS[op.__name__] + if isinstance(other, ArrowStringArray): + result = pc_func(self._data, other._data) + elif isinstance(other, np.ndarray): + result = pc_func(self._data, other) + elif is_scalar(other): + try: + result = pc_func(self._data, pa.scalar(other)) + except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid): + mask = isna(self) | isna(other) + valid = ~mask + result = np.zeros(len(self), dtype="bool") + result[valid] = op(np.array(self)[valid], other) + return BooleanArray(result, mask) + else: + return NotImplemented + + # TODO(ARROW-9429): Add a .to_numpy() to ChunkedArray + return BooleanArray._from_sequence(result.to_pandas().values) + + def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None: + """Set one or more values inplace. + + Parameters + ---------- + key : int, ndarray, or slice + When called from, e.g. ``Series.__setitem__``, ``key`` will be + one of + + * scalar int + * ndarray of integers. + * boolean ndarray + * slice object + + value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object + value or values to be set of ``key``. + + Returns + ------- + None + """ + key = check_array_indexer(self, key) + + if is_integer(key): + if not is_scalar(value): + raise ValueError("Must pass scalars with scalar indexer") + elif isna(value): + value = None + elif not isinstance(value, str): + raise ValueError("Scalar must be NA or str") + + # Slice data and insert inbetween + new_data = [ + *self._data[0:key].chunks, + pa.array([value], type=pa.string()), + *self._data[(key + 1) :].chunks, + ] + self._data = pa.chunked_array(new_data) + else: + # Convert to integer indices and iteratively assign. + # TODO: Make a faster variant of this in Arrow upstream. + # This is probably extremely slow. + + # Convert all possible input key types to an array of integers + if is_bool_dtype(key): + # TODO(ARROW-9430): Directly support setitem(booleans) + key_array = np.argwhere(key).flatten() + elif isinstance(key, slice): + key_array = np.array(range(len(self))[key]) + else: + # TODO(ARROW-9431): Directly support setitem(integers) + key_array = np.asanyarray(key) + + if is_scalar(value): + value = np.broadcast_to(value, len(key_array)) + else: + value = np.asarray(value) + + if len(key_array) != len(value): + raise ValueError("Length of indexer and values mismatch") + + for k, v in zip(key_array, value): + self[k] = v + + def take( + self, indices: Sequence[int], allow_fill: bool = False, fill_value: Any = None + ) -> "ExtensionArray": + """ + Take elements from an array. + + Parameters + ---------- + indices : sequence of int + Indices to be taken. + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to + :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + other negative values raise a ``ValueError``. + + fill_value : any, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type, ``self.dtype.na_value``, is used. + + For many ExtensionArrays, there will be two representations of + `fill_value`: a user-facing "boxed" scalar, and a low-level + physical NA value. `fill_value` should be the user-facing version, + and the implementation should handle translating that to the + physical version for processing the take if necessary. + + Returns + ------- + ExtensionArray + + Raises + ------ + IndexError + When the indices are out of bounds for the array. + ValueError + When `indices` contains negative values other than ``-1`` + and `allow_fill` is True. + + See Also + -------- + numpy.take + api.extensions.take + + Notes + ----- + ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``, + ``iloc``, when `indices` is a sequence of values. Additionally, + it's called by :meth:`Series.reindex`, or any other method + that causes realignment, with a `fill_value`. + """ + # TODO: Remove once we got rid of the (indices < 0) check + if not is_array_like(indices): + indices_array = np.asanyarray(indices) + else: + indices_array = indices + + if len(self._data) == 0 and (indices_array >= 0).any(): + raise IndexError("cannot do a non-empty take") + if indices_array.size > 0 and indices_array.max() >= len(self._data): + raise IndexError("out of bounds value in 'indices'.") + + if allow_fill: + fill_mask = indices_array < 0 + if fill_mask.any(): + validate_indices(indices_array, len(self._data)) + # TODO(ARROW-9433): Treat negative indices as NULL + indices_array = pa.array(indices_array, mask=fill_mask) + result = self._data.take(indices_array) + if isna(fill_value): + return type(self)(result) + # TODO: ArrowNotImplementedError: Function fill_null has no + # kernel matching input types (array[string], scalar[string]) + result = type(self)(result) + result[fill_mask] = fill_value + return result + # return type(self)(pc.fill_null(result, pa.scalar(fill_value))) + else: + # Nothing to fill + return type(self)(self._data.take(indices)) + else: # allow_fill=False + # TODO(ARROW-9432): Treat negative indices as indices from the right. + if (indices_array < 0).any(): + # Don't modify in-place + indices_array = np.copy(indices_array) + indices_array[indices_array < 0] += len(self._data) + return type(self)(self._data.take(indices_array)) + + def value_counts(self, dropna: bool = True) -> Series: + """ + Return a Series containing counts of each unique value. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of missing values. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + from pandas import Index, Series + + vc = self._data.value_counts() + + # Index cannot hold ExtensionArrays yet + index = Index(type(self)(vc.field(0)).astype(object)) + # No missings, so we can adhere to the interface and return a numpy array. + counts = np.array(vc.field(1)) + + if dropna and self._data.null_count > 0: + raise NotImplementedError("yo") + + return Series(counts, index=index).astype("Int64") diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 9758eae60c262..465ec821400e7 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -385,13 +385,17 @@ def maybe_cast_to_extension_array( ExtensionArray or obj """ from pandas.core.arrays.string_ import StringArray + from pandas.core.arrays.string_arrow import ArrowStringArray assert isinstance(cls, type), f"must pass a type: {cls}" assertion_msg = f"must pass a subclass of ExtensionArray: {cls}" assert issubclass(cls, ABCExtensionArray), assertion_msg # Everything can be be converted to StringArrays, but we may not want to convert - if issubclass(cls, StringArray) and lib.infer_dtype(obj) != "string": + if ( + issubclass(cls, (StringArray, ArrowStringArray)) + and lib.infer_dtype(obj) != "string" + ): return obj try: diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 089bbcf4e0e3f..07e9484994c26 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -7,10 +7,54 @@ import pandas as pd import pandas._testing as tm +from pandas.core.arrays.string_arrow import ArrowStringArray, ArrowStringDtype +skip_if_no_pyarrow = td.skip_if_no("pyarrow", min_version="1.0.0") -def test_repr(): - df = pd.DataFrame({"A": pd.array(["a", pd.NA, "b"], dtype="string")}) + +@pytest.fixture( + params=[ + # pandas\tests\arrays\string_\test_string.py:16: error: List item 1 has + # incompatible type "ParameterSet"; expected + # "Sequence[Collection[object]]" [list-item] + "string", + pytest.param( + "arrow_string", marks=skip_if_no_pyarrow + ), # type:ignore[list-item] + ] +) +def dtype(request): + return request.param + + +@pytest.fixture +def dtype_object(dtype): + if dtype == "string": + return pd.StringDtype + else: + return ArrowStringDtype + + +@pytest.fixture( + params=[ + pd.arrays.StringArray, + pytest.param(ArrowStringArray, marks=skip_if_no_pyarrow), + ] +) +def cls(request): + return request.param + + +def test_repr(dtype, request): + if dtype == "arrow_string": + reason = ( + "AssertionError: assert ' A\n0 a\n1 None\n2 b' " + "== ' A\n0 a\n1 <NA>\n2 b'" + ) + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + df = pd.DataFrame({"A": pd.array(["a", pd.NA, "b"], dtype=dtype)}) expected = " A\n0 a\n1 <NA>\n2 b" assert repr(df) == expected @@ -21,27 +65,36 @@ def test_repr(): assert repr(df.A.array) == expected -def test_none_to_nan(): - a = pd.arrays.StringArray._from_sequence(["a", None, "b"]) +def test_none_to_nan(cls): + a = cls._from_sequence(["a", None, "b"]) assert a[1] is not None assert a[1] is pd.NA -def test_setitem_validates(): - a = pd.arrays.StringArray._from_sequence(["a", "b"]) - with pytest.raises(ValueError, match="10"): - a[0] = 10 +def test_setitem_validates(cls): + arr = cls._from_sequence(["a", "b"]) - with pytest.raises(ValueError, match="strings"): - a[:] = np.array([1, 2]) + if cls is pd.arrays.StringArray: + msg = "Cannot set non-string value '10' into a StringArray." + else: + msg = "Scalar must be NA or str" + with pytest.raises(ValueError, match=msg): + arr[0] = 10 + if cls is pd.arrays.StringArray: + msg = "Must provide strings." + else: + msg = "Scalar must be NA or str" + with pytest.raises(ValueError, match=msg): + arr[:] = np.array([1, 2]) -def test_setitem_with_scalar_string(): + +def test_setitem_with_scalar_string(dtype): # is_float_dtype considers some strings, like 'd', to be floats # which can cause issues. - arr = pd.array(["a", "c"], dtype="string") + arr = pd.array(["a", "c"], dtype=dtype) arr[0] = "d" - expected = pd.array(["d", "c"], dtype="string") + expected = pd.array(["d", "c"], dtype=dtype) tm.assert_extension_array_equal(arr, expected) @@ -53,46 +106,69 @@ def test_setitem_with_scalar_string(): (["a b", "a bc. de"], operator.methodcaller("capitalize")), ], ) -def test_string_methods(input, method): - a = pd.Series(input, dtype="string") +def test_string_methods(input, method, dtype, request): + if dtype == "arrow_string": + reason = "AttributeError: 'ArrowStringDtype' object has no attribute 'base'" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + a = pd.Series(input, dtype=dtype) b = pd.Series(input, dtype="object") result = method(a.str) expected = method(b.str) - assert result.dtype.name == "string" + assert result.dtype.name == dtype tm.assert_series_equal(result.astype(object), expected) -def test_astype_roundtrip(): +def test_astype_roundtrip(dtype, request): + if dtype == "arrow_string": + reason = "ValueError: Could not convert object to NumPy datetime" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + s = pd.Series(pd.date_range("2000", periods=12)) s[0] = None - result = s.astype("string").astype("datetime64[ns]") + result = s.astype(dtype).astype("datetime64[ns]") tm.assert_series_equal(result, s) -def test_add(): - a = pd.Series(["a", "b", "c", None, None], dtype="string") - b = pd.Series(["x", "y", None, "z", None], dtype="string") +def test_add(dtype, request): + if dtype == "arrow_string": + reason = ( + "TypeError: unsupported operand type(s) for +: 'ArrowStringArray' and " + "'ArrowStringArray'" + ) + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + a = pd.Series(["a", "b", "c", None, None], dtype=dtype) + b = pd.Series(["x", "y", None, "z", None], dtype=dtype) result = a + b - expected = pd.Series(["ax", "by", None, None, None], dtype="string") + expected = pd.Series(["ax", "by", None, None, None], dtype=dtype) tm.assert_series_equal(result, expected) result = a.add(b) tm.assert_series_equal(result, expected) result = a.radd(b) - expected = pd.Series(["xa", "yb", None, None, None], dtype="string") + expected = pd.Series(["xa", "yb", None, None, None], dtype=dtype) tm.assert_series_equal(result, expected) result = a.add(b, fill_value="-") - expected = pd.Series(["ax", "by", "c-", "-z", None], dtype="string") + expected = pd.Series(["ax", "by", "c-", "-z", None], dtype=dtype) tm.assert_series_equal(result, expected) -def test_add_2d(): - a = pd.array(["a", "b", "c"], dtype="string") +def test_add_2d(dtype, request): + if dtype == "arrow_string": + reason = "Failed: DID NOT RAISE <class 'ValueError'>" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + a = pd.array(["a", "b", "c"], dtype=dtype) b = np.array([["a", "b", "c"]], dtype=object) with pytest.raises(ValueError, match="3 != 1"): a + b @@ -102,23 +178,38 @@ def test_add_2d(): s + b -def test_add_sequence(): - a = pd.array(["a", "b", None, None], dtype="string") +def test_add_sequence(dtype, request): + if dtype == "arrow_string": + reason = ( + "TypeError: unsupported operand type(s) for +: 'ArrowStringArray' " + "and 'list'" + ) + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + a = pd.array(["a", "b", None, None], dtype=dtype) other = ["x", None, "y", None] result = a + other - expected = pd.array(["ax", None, None, None], dtype="string") + expected = pd.array(["ax", None, None, None], dtype=dtype) tm.assert_extension_array_equal(result, expected) result = other + a - expected = pd.array(["xa", None, None, None], dtype="string") + expected = pd.array(["xa", None, None, None], dtype=dtype) tm.assert_extension_array_equal(result, expected) -def test_mul(): - a = pd.array(["a", "b", None], dtype="string") +def test_mul(dtype, request): + if dtype == "arrow_string": + reason = ( + "TypeError: unsupported operand type(s) for *: 'ArrowStringArray' and 'int'" + ) + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + a = pd.array(["a", "b", None], dtype=dtype) result = a * 2 - expected = pd.array(["aa", "bb", None], dtype="string") + expected = pd.array(["aa", "bb", None], dtype=dtype) tm.assert_extension_array_equal(result, expected) result = 2 * a @@ -126,55 +217,83 @@ def test_mul(): @pytest.mark.xfail(reason="GH-28527") -def test_add_strings(): - array = pd.array(["a", "b", "c", "d"], dtype="string") +def test_add_strings(dtype): + array = pd.array(["a", "b", "c", "d"], dtype=dtype) df = pd.DataFrame([["t", "u", "v", "w"]]) assert array.__add__(df) is NotImplemented result = array + df - expected = pd.DataFrame([["at", "bu", "cv", "dw"]]).astype("string") + expected = pd.DataFrame([["at", "bu", "cv", "dw"]]).astype(dtype) tm.assert_frame_equal(result, expected) result = df + array - expected = pd.DataFrame([["ta", "ub", "vc", "wd"]]).astype("string") + expected = pd.DataFrame([["ta", "ub", "vc", "wd"]]).astype(dtype) tm.assert_frame_equal(result, expected) @pytest.mark.xfail(reason="GH-28527") -def test_add_frame(): - array = pd.array(["a", "b", np.nan, np.nan], dtype="string") +def test_add_frame(dtype): + array = pd.array(["a", "b", np.nan, np.nan], dtype=dtype) df = pd.DataFrame([["x", np.nan, "y", np.nan]]) assert array.__add__(df) is NotImplemented result = array + df - expected = pd.DataFrame([["ax", np.nan, np.nan, np.nan]]).astype("string") + expected = pd.DataFrame([["ax", np.nan, np.nan, np.nan]]).astype(dtype) tm.assert_frame_equal(result, expected) result = df + array - expected = pd.DataFrame([["xa", np.nan, np.nan, np.nan]]).astype("string") + expected = pd.DataFrame([["xa", np.nan, np.nan, np.nan]]).astype(dtype) tm.assert_frame_equal(result, expected) -def test_comparison_methods_scalar(all_compare_operators): +def test_comparison_methods_scalar(all_compare_operators, dtype): op_name = all_compare_operators - - a = pd.array(["a", None, "c"], dtype="string") + a = pd.array(["a", None, "c"], dtype=dtype) other = "a" result = getattr(a, op_name)(other) expected = np.array([getattr(item, op_name)(other) for item in a], dtype=object) expected = pd.array(expected, dtype="boolean") tm.assert_extension_array_equal(result, expected) + +def test_comparison_methods_scalar_pd_na(all_compare_operators, dtype): + op_name = all_compare_operators + a = pd.array(["a", None, "c"], dtype=dtype) result = getattr(a, op_name)(pd.NA) expected = pd.array([None, None, None], dtype="boolean") tm.assert_extension_array_equal(result, expected) -def test_comparison_methods_array(all_compare_operators): +def test_comparison_methods_scalar_not_string(all_compare_operators, dtype, request): + if all_compare_operators not in ["__eq__", "__ne__"]: + reason = "comparison op not supported between instances of 'str' and 'int'" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + op_name = all_compare_operators + a = pd.array(["a", None, "c"], dtype=dtype) + other = 42 + result = getattr(a, op_name)(other) + expected_data = {"__eq__": [False, None, False], "__ne__": [True, None, True]}[ + op_name + ] + expected = pd.array(expected_data, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_comparison_methods_array(all_compare_operators, dtype, request): + if dtype == "arrow_string": + if all_compare_operators in ["__eq__", "__ne__"]: + reason = "NotImplementedError: Neither scalar nor ArrowStringArray" + else: + reason = "AssertionError: left is not an ExtensionArray" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + op_name = all_compare_operators - a = pd.array(["a", None, "c"], dtype="string") + a = pd.array(["a", None, "c"], dtype=dtype) other = [None, None, "c"] result = getattr(a, op_name)(other) expected = np.empty_like(a, dtype="object") @@ -187,30 +306,46 @@ def test_comparison_methods_array(all_compare_operators): tm.assert_extension_array_equal(result, expected) -def test_constructor_raises(): - with pytest.raises(ValueError, match="sequence of strings"): - pd.arrays.StringArray(np.array(["a", "b"], dtype="S1")) +def test_constructor_raises(cls): + if cls is pd.arrays.StringArray: + msg = "StringArray requires a sequence of strings or pandas.NA" + else: + msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowStringArray" + + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", "b"], dtype="S1")) - with pytest.raises(ValueError, match="sequence of strings"): - pd.arrays.StringArray(np.array([])) + with pytest.raises(ValueError, match=msg): + cls(np.array([])) - with pytest.raises(ValueError, match="strings or pandas.NA"): - pd.arrays.StringArray(np.array(["a", np.nan], dtype=object)) + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", np.nan], dtype=object)) - with pytest.raises(ValueError, match="strings or pandas.NA"): - pd.arrays.StringArray(np.array(["a", None], dtype=object)) + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", None], dtype=object)) - with pytest.raises(ValueError, match="strings or pandas.NA"): - pd.arrays.StringArray(np.array(["a", pd.NaT], dtype=object)) + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", pd.NaT], dtype=object)) @pytest.mark.parametrize("copy", [True, False]) -def test_from_sequence_no_mutate(copy): +def test_from_sequence_no_mutate(copy, cls, request): + if cls is ArrowStringArray and copy is False: + reason = "AssertionError: numpy array are different" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + nan_arr = np.array(["a", np.nan], dtype=object) na_arr = np.array(["a", pd.NA], dtype=object) - result = pd.arrays.StringArray._from_sequence(nan_arr, copy=copy) - expected = pd.arrays.StringArray(na_arr) + result = cls._from_sequence(nan_arr, copy=copy) + + if cls is ArrowStringArray: + import pyarrow as pa + + expected = cls(pa.array(na_arr, type=pa.string(), from_pandas=True)) + else: + expected = cls(na_arr) tm.assert_extension_array_equal(result, expected) @@ -218,8 +353,13 @@ def test_from_sequence_no_mutate(copy): tm.assert_numpy_array_equal(nan_arr, expected) -def test_astype_int(): - arr = pd.array(["1", pd.NA, "3"], dtype="string") +def test_astype_int(dtype, request): + if dtype == "arrow_string": + reason = "TypeError: Cannot interpret 'Int64Dtype()' as a data type" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + arr = pd.array(["1", pd.NA, "3"], dtype=dtype) result = arr.astype("Int64") expected = pd.array([1, pd.NA, 3], dtype="Int64") @@ -228,16 +368,21 @@ def test_astype_int(): @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.xfail(reason="Not implemented StringArray.sum") -def test_reduce(skipna): - arr = pd.Series(["a", "b", "c"], dtype="string") +def test_reduce(skipna, dtype): + arr = pd.Series(["a", "b", "c"], dtype=dtype) result = arr.sum(skipna=skipna) assert result == "abc" @pytest.mark.parametrize("method", ["min", "max"]) @pytest.mark.parametrize("skipna", [True, False]) -def test_min_max(method, skipna): - arr = pd.Series(["a", "b", "c", None], dtype="string") +def test_min_max(method, skipna, dtype, request): + if dtype == "arrow_string": + reason = "AttributeError: 'ArrowStringArray' object has no attribute 'max'" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + arr = pd.Series(["a", "b", "c", None], dtype=dtype) result = getattr(arr, method)(skipna=skipna) if skipna: expected = "a" if method == "min" else "c" @@ -247,14 +392,20 @@ def test_min_max(method, skipna): @pytest.mark.parametrize("method", ["min", "max"]) -@pytest.mark.parametrize( - "arr", - [ - pd.Series(["a", "b", "c", None], dtype="string"), - pd.array(["a", "b", "c", None], dtype="string"), - ], -) -def test_min_max_numpy(method, arr): +@pytest.mark.parametrize("box", [pd.Series, pd.array]) +def test_min_max_numpy(method, box, dtype, request): + if dtype == "arrow_string": + if box is pd.array: + reason = ( + "TypeError: '<=' not supported between instances of 'str' and " + "'NoneType'" + ) + else: + reason = "AttributeError: 'ArrowStringArray' object has no attribute 'max'" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + arr = box(["a", "b", "c", None], dtype=dtype) result = getattr(np, method)(arr) expected = "a" if method == "min" else "c" assert result == expected @@ -262,8 +413,8 @@ def test_min_max_numpy(method, arr): @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.xfail(reason="Not implemented StringArray.sum") -def test_reduce_missing(skipna): - arr = pd.Series([None, "a", None, "b", "c", None], dtype="string") +def test_reduce_missing(skipna, dtype): + arr = pd.Series([None, "a", None, "b", "c", None], dtype=dtype) result = arr.sum(skipna=skipna) if skipna: assert result == "abc" @@ -272,34 +423,42 @@ def test_reduce_missing(skipna): @td.skip_if_no("pyarrow", min_version="0.15.0") -def test_arrow_array(): +def test_arrow_array(dtype): # protocol added in 0.15.0 import pyarrow as pa - data = pd.array(["a", "b", "c"], dtype="string") + data = pd.array(["a", "b", "c"], dtype=dtype) arr = pa.array(data) expected = pa.array(list(data), type=pa.string(), from_pandas=True) + if dtype == "arrow_string": + expected = pa.chunked_array(expected) + assert arr.equals(expected) @td.skip_if_no("pyarrow", min_version="0.15.1.dev") -def test_arrow_roundtrip(): +def test_arrow_roundtrip(dtype, dtype_object): # roundtrip possible from arrow 1.0.0 import pyarrow as pa - data = pd.array(["a", "b", None], dtype="string") + data = pd.array(["a", "b", None], dtype=dtype) df = pd.DataFrame({"a": data}) table = pa.table(df) assert table.field("a").type == "string" result = table.to_pandas() - assert isinstance(result["a"].dtype, pd.StringDtype) + assert isinstance(result["a"].dtype, dtype_object) tm.assert_frame_equal(result, df) # ensure the missing value is represented by NA and not np.nan or None assert result.loc[2, "a"] is pd.NA -def test_value_counts_na(): - arr = pd.array(["a", "b", "a", pd.NA], dtype="string") +def test_value_counts_na(dtype, request): + if dtype == "arrow_string": + reason = "TypeError: boolean value of NA is ambiguous" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + arr = pd.array(["a", "b", "a", pd.NA], dtype=dtype) result = arr.value_counts(dropna=False) expected = pd.Series([2, 1, 1], index=["a", pd.NA, "b"], dtype="Int64") tm.assert_series_equal(result, expected) @@ -312,12 +471,13 @@ def test_value_counts_na(): @pytest.mark.parametrize( "values, expected", [ - (pd.array(["a", "b", "c"]), np.array([False, False, False])), - (pd.array(["a", "b", None]), np.array([False, False, True])), + (["a", "b", "c"], np.array([False, False, False])), + (["a", "b", None], np.array([False, False, True])), ], ) -def test_use_inf_as_na(values, expected): +def test_use_inf_as_na(values, expected, dtype): # https://github.com/pandas-dev/pandas/issues/33655 + values = pd.array(values, dtype=dtype) with pd.option_context("mode.use_inf_as_na", True): result = values.isna() tm.assert_numpy_array_equal(result, expected) @@ -331,17 +491,36 @@ def test_use_inf_as_na(values, expected): tm.assert_frame_equal(result, expected) -def test_memory_usage(): +def test_memory_usage(dtype, request): # GH 33963 - series = pd.Series(["a", "b", "c"], dtype="string") + + if dtype == "arrow_string": + pytest.skip("not applicable") + + series = pd.Series(["a", "b", "c"], dtype=dtype) assert 0 < series.nbytes <= series.memory_usage() < series.memory_usage(deep=True) -@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) -def test_astype_from_float_dtype(dtype): +@pytest.mark.parametrize("float_dtype", [np.float16, np.float32, np.float64]) +def test_astype_from_float_dtype(float_dtype, dtype): # https://github.com/pandas-dev/pandas/issues/36451 - s = pd.Series([0.1], dtype=dtype) - result = s.astype("string") - expected = pd.Series(["0.1"], dtype="string") + s = pd.Series([0.1], dtype=float_dtype) + result = s.astype(dtype) + expected = pd.Series(["0.1"], dtype=dtype) tm.assert_series_equal(result, expected) + + +def test_to_numpy_returns_pdna_default(dtype): + arr = pd.array(["a", pd.NA, "b"], dtype=dtype) + result = np.array(arr) + expected = np.array(["a", pd.NA, "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_na_value(dtype, nulls_fixture): + na_value = nulls_fixture + arr = pd.array(["a", pd.NA, "b"], dtype=dtype) + result = arr.to_numpy(na_value=na_value) + expected = np.array(["a", na_value, "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py new file mode 100644 index 0000000000000..ec7f57940a67f --- /dev/null +++ b/pandas/tests/arrays/string_/test_string_arrow.py @@ -0,0 +1,26 @@ +import re + +import numpy as np +import pytest + +from pandas.core.arrays.string_arrow import ArrowStringArray + +pa = pytest.importorskip("pyarrow", minversion="1.0.0") + + +@pytest.mark.parametrize("chunked", [True, False]) +@pytest.mark.parametrize("array", [np, pa]) +def test_constructor_not_string_type_raises(array, chunked): + arr = array.array([1, 2, 3]) + if chunked: + if array is np: + pytest.skip("chunked not applicable to numpy array") + arr = pa.chunked_array(arr) + if array is np: + msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowStringArray" + else: + msg = re.escape( + "ArrowStringArray requires a PyArrow (chunked) array of string type" + ) + with pytest.raises(ValueError, match=msg): + ArrowStringArray(arr) diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py index 27a157d2127f6..db1940226e04e 100644 --- a/pandas/tests/extension/test_string.py +++ b/pandas/tests/extension/test_string.py @@ -3,39 +3,49 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd -from pandas.core.arrays.string_ import StringArray, StringDtype +from pandas.core.arrays.string_ import StringDtype +from pandas.core.arrays.string_arrow import ArrowStringDtype from pandas.tests.extension import base -@pytest.fixture -def dtype(): - return StringDtype() +@pytest.fixture( + params=[ + StringDtype, + pytest.param( + ArrowStringDtype, marks=td.skip_if_no("pyarrow", min_version="1.0.0") + ), + ] +) +def dtype(request): + return request.param() @pytest.fixture -def data(): +def data(dtype): strings = np.random.choice(list(string.ascii_letters), size=100) while strings[0] == strings[1]: strings = np.random.choice(list(string.ascii_letters), size=100) - return StringArray._from_sequence(strings) + return dtype.construct_array_type()._from_sequence(strings) @pytest.fixture -def data_missing(): +def data_missing(dtype): """Length 2 array with [NA, Valid]""" - return StringArray._from_sequence([pd.NA, "A"]) + return dtype.construct_array_type()._from_sequence([pd.NA, "A"]) @pytest.fixture -def data_for_sorting(): - return StringArray._from_sequence(["B", "C", "A"]) +def data_for_sorting(dtype): + return dtype.construct_array_type()._from_sequence(["B", "C", "A"]) @pytest.fixture -def data_missing_for_sorting(): - return StringArray._from_sequence(["B", pd.NA, "A"]) +def data_missing_for_sorting(dtype): + return dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"]) @pytest.fixture @@ -44,8 +54,10 @@ def na_value(): @pytest.fixture -def data_for_grouping(): - return StringArray._from_sequence(["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"]) +def data_for_grouping(dtype): + return dtype.construct_array_type()._from_sequence( + ["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"] + ) class TestDtype(base.BaseDtypeTests): @@ -53,7 +65,11 @@ class TestDtype(base.BaseDtypeTests): class TestInterface(base.BaseInterfaceTests): - pass + def test_view(self, data, request): + if isinstance(data.dtype, ArrowStringDtype): + mark = pytest.mark.xfail(reason="not implemented") + request.node.add_marker(mark) + super().test_view(data) class TestConstructors(base.BaseConstructorsTests): @@ -61,7 +77,11 @@ class TestConstructors(base.BaseConstructorsTests): class TestReshaping(base.BaseReshapingTests): - pass + def test_transpose(self, data, dtype, request): + if isinstance(dtype, ArrowStringDtype): + mark = pytest.mark.xfail(reason="not implemented") + request.node.add_marker(mark) + super().test_transpose(data) class TestGetitem(base.BaseGetitemTests): @@ -69,7 +89,11 @@ class TestGetitem(base.BaseGetitemTests): class TestSetitem(base.BaseSetitemTests): - pass + def test_setitem_preserves_views(self, data, dtype, request): + if isinstance(dtype, ArrowStringDtype): + mark = pytest.mark.xfail(reason="not implemented") + request.node.add_marker(mark) + super().test_setitem_preserves_views(data) class TestMissing(base.BaseMissingTests):
- [x] xref #35169 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35259
2020-07-13T10:20:01Z
2020-11-20T14:20:24Z
2020-11-20T14:20:24Z
2020-11-20T20:50:34Z
[BUG] fixed DateOffset pickle bug when months >= 12
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 33e70daa55e66..8b2b9c14046af 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -80,6 +80,7 @@ Datetimelike ^^^^^^^^^^^^ - Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`) - Bug in ``NaT`` comparisons failing to raise ``TypeError`` on invalid inequality comparisons (:issue:`35046`) +- Bug in :class:`DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`) - Timedelta diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index ac2725fc58aee..7f0314d737619 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -989,13 +989,6 @@ cdef class RelativeDeltaOffset(BaseOffset): state["_offset"] = state.pop("offset") state["kwds"]["offset"] = state["_offset"] - if "_offset" in state and not isinstance(state["_offset"], timedelta): - # relativedelta, we need to populate using its kwds - offset = state["_offset"] - odict = offset.__dict__ - kwds = {key: odict[key] for key in odict if odict[key]} - state.update(kwds) - self.n = state.pop("n") self.normalize = state.pop("normalize") self._cache = state.pop("_cache", {}) diff --git a/pandas/tests/io/data/legacy_pickle/1.1.0/1.1.0_x86_64_darwin_3.8.5.pickle b/pandas/tests/io/data/legacy_pickle/1.1.0/1.1.0_x86_64_darwin_3.8.5.pickle new file mode 100644 index 0000000000000..f8df9afff6565 Binary files /dev/null and b/pandas/tests/io/data/legacy_pickle/1.1.0/1.1.0_x86_64_darwin_3.8.5.pickle differ diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index e64103bd2cde8..61e1fc019faac 100644 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -6,10 +6,10 @@ in ~/pandas . activate pandas_0.20.3 -cd ~/ +cd ~/pandas/pandas -$ python pandas/pandas/tests/io/generate_legacy_storage_files.py \ - pandas/pandas/tests/io/data/legacy_pickle/0.20.3/ pickle +$ python -m tests.io.generate_legacy_storage_files \ + tests/io/data/legacy_pickle/0.20.3/ pickle This script generates a storage file for the current arch, system, and python version @@ -328,7 +328,7 @@ def write_legacy_pickles(output_dir): pth = f"{platform_name()}.pickle" fh = open(os.path.join(output_dir, pth), "wb") - pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL) + pickle.dump(create_pickle_data(), fh, pickle.DEFAULT_PROTOCOL) fh.close() print(f"created pickle file: {pth}") diff --git a/pandas/tests/tseries/offsets/data/dateoffset_0_15_2.pickle b/pandas/tests/tseries/offsets/data/dateoffset_0_15_2.pickle deleted file mode 100644 index ce561526a5e12..0000000000000 --- a/pandas/tests/tseries/offsets/data/dateoffset_0_15_2.pickle +++ /dev/null @@ -1,183 +0,0 @@ -(dp0 -S'YearBegin' -p1 -ccopy_reg -_reconstructor -p2 -(cpandas.tseries.offsets -YearBegin -p3 -c__builtin__ -object -p4 -Ntp5 -Rp6 -(dp7 -S'normalize' -p8 -I00 -sS'kwds' -p9 -(dp10 -sS'n' -p11 -I1 -sS'_offset' -p12 -cdatetime -timedelta -p13 -(I1 -I0 -I0 -tp14 -Rp15 -sS'month' -p16 -I1 -sS'_use_relativedelta' -p17 -I00 -sbsS'Week' -p18 -g2 -(cpandas.tseries.offsets -Week -p19 -g4 -Ntp20 -Rp21 -(dp22 -g8 -I00 -sS'_inc' -p23 -g13 -(I7 -I0 -I0 -tp24 -Rp25 -sg9 -(dp26 -sS'weekday' -p27 -Nsg11 -I1 -sbsS'MonthBegin' -p28 -g2 -(cpandas.tseries.offsets -MonthBegin -p29 -g4 -Ntp30 -Rp31 -(dp32 -g8 -I00 -sg12 -g13 -(I1 -I0 -I0 -tp33 -Rp34 -sg17 -I00 -sg9 -(dp35 -sg11 -I1 -sbsS'Day' -p36 -g2 -(cpandas.tseries.offsets -Day -p37 -g4 -Ntp38 -Rp39 -(dp40 -g8 -I00 -sg12 -g13 -(I1 -I0 -I0 -tp41 -Rp42 -sg17 -I00 -sg9 -(dp43 -sg11 -I1 -sbsS'DateOffset' -p44 -g2 -(cpandas.tseries.offsets -DateOffset -p45 -g4 -Ntp46 -Rp47 -(dp48 -g8 -I00 -sg12 -(idateutil.relativedelta -relativedelta -p49 -(dp50 -S'_has_time' -p51 -I0 -sS'hour' -p52 -NsS'seconds' -p53 -I0 -sS'months' -p54 -I0 -sS'year' -p55 -NsS'days' -p56 -I0 -sS'years' -p57 -I1 -sS'hours' -p58 -I0 -sS'second' -p59 -NsS'microsecond' -p60 -Nsg16 -NsS'microseconds' -p61 -I0 -sS'leapdays' -p62 -I0 -sS'minutes' -p63 -I0 -sS'day' -p64 -NsS'minute' -p65 -Nsg27 -Nsbsg17 -I01 -sg9 -(dp66 -g57 -I1 -ssg11 -I1 -sbs. \ No newline at end of file diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 8c51908c547f4..d994ce35d682b 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -635,22 +635,6 @@ def test_add_empty_datetimeindex(self, offset_types, tz_naive_fixture): result = offset_s + dta tm.assert_equal(result, dta) - def test_pickle_v0_15_2(self, datapath): - offsets = { - "DateOffset": DateOffset(years=1), - "MonthBegin": MonthBegin(1), - "Day": Day(1), - "YearBegin": YearBegin(1), - "Week": Week(1), - } - - pickle_path = datapath("tseries", "offsets", "data", "dateoffset_0_15_2.pickle") - # This code was executed once on v0.15.2 to generate the pickle: - # with open(pickle_path, 'wb') as f: pickle.dump(offsets, f) - # - result = read_pickle(pickle_path) - tm.assert_dict_equal(offsets, result) - def test_pickle_roundtrip(self, offset_types): off = self._get_offset(offset_types) res = tm.round_trip_pickle(off) @@ -664,6 +648,15 @@ def test_pickle_roundtrip(self, offset_types): # Make sure nothings got lost from _params (which __eq__) is based on assert getattr(off, attr) == getattr(res, attr) + def test_pickle_dateoffset_odd_inputs(self): + # GH#34511 + off = DateOffset(months=12) + res = tm.round_trip_pickle(off) + assert off == res + + base_dt = datetime(2020, 1, 1) + assert base_dt + off == base_dt + res + def test_onOffset_deprecated(self, offset_types): # GH#30340 use idiomatic naming off = self._get_offset(offset_types)
- [x] closes #34511 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35258
2020-07-13T08:13:54Z
2020-08-14T16:17:38Z
2020-08-14T16:17:38Z
2021-03-20T02:35:50Z
REF: make tz_convert match pattern elsewhere
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index a6afd47d93479..606639af16a18 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -382,7 +382,10 @@ cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz): converted: int64 """ cdef: - int64_t arr[1] + int64_t delta + int64_t[:] deltas + ndarray[int64_t, ndim=1] trans + intp_t pos if val == NPY_NAT: return val @@ -391,9 +394,14 @@ cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz): return val elif is_tzlocal(tz): return _tz_convert_tzlocal_utc(val, tz, to_utc=False) + elif is_fixed_offset(tz): + _, deltas, _ = get_dst_info(tz) + delta = deltas[0] + return val + delta else: - arr[0] = val - return _tz_convert_dst(arr, tz)[0] + trans, deltas, _ = get_dst_info(tz) + pos = trans.searchsorted(val, side="right") - 1 + return val + deltas[pos] def tz_convert_from_utc(int64_t[:] vals, tzinfo tz): @@ -435,9 +443,12 @@ cdef int64_t[:] _tz_convert_from_utc(int64_t[:] vals, tzinfo tz): converted : ndarray[int64_t] """ cdef: - int64_t[:] converted + int64_t[:] converted, deltas Py_ssize_t i, n = len(vals) - int64_t val + int64_t val, delta + intp_t[:] pos + ndarray[int64_t] trans + str typ if is_utc(tz): converted = vals @@ -450,7 +461,35 @@ cdef int64_t[:] _tz_convert_from_utc(int64_t[:] vals, tzinfo tz): else: converted[i] = _tz_convert_tzlocal_utc(val, tz, to_utc=False) else: - converted = _tz_convert_dst(vals, tz) + converted = np.empty(n, dtype=np.int64) + + trans, deltas, typ = get_dst_info(tz) + + if typ not in ["pytz", "dateutil"]: + # FixedOffset, we know len(deltas) == 1 + delta = deltas[0] + + for i in range(n): + val = vals[i] + if val == NPY_NAT: + converted[i] = val + else: + converted[i] = val + delta + + else: + pos = trans.searchsorted(vals, side="right") - 1 + + for i in range(n): + val = vals[i] + if val == NPY_NAT: + converted[i] = val + else: + if pos[i] < 0: + # TODO: How is this reached? Should we be checking for + # it elsewhere? + raise ValueError("First time before start of DST info") + + converted[i] = val + deltas[pos[i]] return converted @@ -537,67 +576,3 @@ cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True, return val - delta else: return val + delta - - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef int64_t[:] _tz_convert_dst(const int64_t[:] values, tzinfo tz): - """ - tz_convert for non-UTC non-tzlocal cases where we have to check - DST transitions pointwise. - - Parameters - ---------- - values : ndarray[int64_t] - tz : tzinfo - - Returns - ------- - result : ndarray[int64_t] - """ - cdef: - Py_ssize_t n = len(values) - Py_ssize_t i - intp_t[:] pos - int64_t[:] result = np.empty(n, dtype=np.int64) - ndarray[int64_t] trans - int64_t[:] deltas - int64_t v, delta - str typ - - # tz is assumed _not_ to be tzlocal; that should go - # through _tz_convert_tzlocal_utc - - trans, deltas, typ = get_dst_info(tz) - - if typ not in ["pytz", "dateutil"]: - # FixedOffset, we know len(deltas) == 1 - delta = deltas[0] - - for i in range(n): - v = values[i] - if v == NPY_NAT: - result[i] = v - else: - result[i] = v + delta - - else: - # Previously, this search was done pointwise to try and benefit - # from getting to skip searches for iNaTs. However, it seems call - # overhead dominates the search time so doing it once in bulk - # is substantially faster (GH#24603) - pos = trans.searchsorted(values, side="right") - 1 - - for i in range(n): - v = values[i] - if v == NPY_NAT: - result[i] = v - else: - if pos[i] < 0: - # TODO: How is this reached? Should we be checking for - # it elsewhere? - raise ValueError("First time before start of DST info") - - result[i] = v + deltas[pos[i]] - - return result
asvs for tslibs.tz_convert show this as perf-neutral
https://api.github.com/repos/pandas-dev/pandas/pulls/35255
2020-07-12T23:00:59Z
2020-07-13T16:42:47Z
2020-07-13T16:42:47Z
2020-07-13T20:13:24Z
BUG: Use correct ExtensionArray reductions in DataFrame reductions
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index cfac916157649..9bc1499d5511e 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -913,6 +913,7 @@ Numeric - Bug in :meth:`DataFrame.diff` with ``axis=1`` returning incorrect results with mixed dtypes (:issue:`32995`) - Bug in :meth:`DataFrame.corr` and :meth:`DataFrame.cov` raising when handling nullable integer columns with ``pandas.NA`` (:issue:`33803`) - Bug in :class:`DataFrame` and :class:`Series` addition and subtraction between object-dtype objects and ``datetime64`` dtype objects (:issue:`33824`) +- Bug in :class:`DataFrame` reductions (e.g. ``df.min()``, ``df.max()``) with ``ExtensionArray`` dtypes (:issue:`34520`, :issue:`32651`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cfe5621fec14e..3b6eb9e3a27c0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -118,6 +118,7 @@ from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray from pandas.core.arrays.sparse import SparseFrameAccessor +from pandas.core.construction import extract_array from pandas.core.generic import NDFrame, _shared_docs from pandas.core.indexes import base as ibase from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences @@ -8499,7 +8500,14 @@ def _count_level(self, level, axis=0, numeric_only=False): return result def _reduce( - self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds + self, + op, + name: str, + axis=0, + skipna=True, + numeric_only=None, + filter_type=None, + **kwds, ): assert filter_type is None or filter_type == "bool", filter_type @@ -8531,8 +8539,11 @@ def _reduce( labels = self._get_agg_axis(axis) constructor = self._constructor - def f(x): - return op(x, axis=axis, skipna=skipna, **kwds) + def func(values): + if is_extension_array_dtype(values.dtype): + return extract_array(values)._reduce(name, skipna=skipna, **kwds) + else: + return op(values, axis=axis, skipna=skipna, **kwds) def _get_data(axis_matters): if filter_type is None: @@ -8579,7 +8590,7 @@ def blk_func(values): out[:] = coerce_to_dtypes(out.values, df.dtypes) return out - if not self._is_homogeneous_type: + if not self._is_homogeneous_type or self._mgr.any_extension_types: # try to avoid self.values call if filter_type is None and axis == 0 and len(self) > 0: @@ -8599,7 +8610,7 @@ def blk_func(values): from pandas.core.apply import frame_apply opa = frame_apply( - self, func=f, result_type="expand", ignore_failures=True + self, func=func, result_type="expand", ignore_failures=True ) result = opa.get_result() if result.ndim == self.ndim: @@ -8611,7 +8622,7 @@ def blk_func(values): values = data.values try: - result = f(values) + result = func(values) except TypeError: # e.g. in nanops trying to convert strs to float @@ -8622,7 +8633,7 @@ def blk_func(values): values = data.values with np.errstate(all="ignore"): - result = f(values) + result = func(values) else: if numeric_only: @@ -8633,7 +8644,7 @@ def blk_func(values): else: data = self values = data.values - result = f(values) + result = func(values) if filter_type == "bool" and is_object_dtype(values) and axis is None: # work around https://github.com/numpy/numpy/issues/10489 diff --git a/pandas/tests/arrays/integer/test_function.py b/pandas/tests/arrays/integer/test_function.py index 44c3077228e80..a81434339fdae 100644 --- a/pandas/tests/arrays/integer/test_function.py +++ b/pandas/tests/arrays/integer/test_function.py @@ -133,6 +133,15 @@ def test_integer_array_numpy_sum(values, expected): assert result == expected +@pytest.mark.parametrize("op", ["sum", "prod", "min", "max"]) +def test_dataframe_reductions(op): + # https://github.com/pandas-dev/pandas/pull/32867 + # ensure the integers are not cast to float during reductions + df = pd.DataFrame({"a": pd.array([1, 2], dtype="Int64")}) + result = df.max() + assert isinstance(result["a"], np.int64) + + # TODO(jreback) - these need testing / are broken # shift diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index db8bb5ca3c437..9d6b9f39a0578 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1303,3 +1303,26 @@ def test_preserve_timezone(self, initial: str, method): df = DataFrame([expected]) result = getattr(df, method)(axis=1) tm.assert_series_equal(result, expected) + + +def test_mixed_frame_with_integer_sum(): + # https://github.com/pandas-dev/pandas/issues/34520 + df = pd.DataFrame([["a", 1]], columns=list("ab")) + df = df.astype({"b": "Int64"}) + result = df.sum() + expected = pd.Series(["a", 1], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("numeric_only", [True, False, None]) +@pytest.mark.parametrize("method", ["min", "max"]) +def test_minmax_extensionarray(method, numeric_only): + # https://github.com/pandas-dev/pandas/issues/32651 + int64_info = np.iinfo("int64") + ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype()) + df = DataFrame({"Int64": ser}) + result = getattr(df, method)(numeric_only=numeric_only) + expected = Series( + [getattr(int64_info, method)], index=pd.Index(["Int64"], dtype="object") + ) + tm.assert_series_equal(result, expected)
- [x] closes #34520 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35254
2020-07-12T22:48:34Z
2020-07-15T22:17:58Z
2020-07-15T22:17:58Z
2020-07-15T22:44:19Z
BUG: ValueError on groupby with categoricals
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 5f93e08d51baa..85b29a58a1f15 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1091,6 +1091,7 @@ Groupby/resample/rolling - Bug in :meth:`Rolling.apply` where ``center=True`` was ignored when ``engine='numba'`` was specified (:issue:`34784`) - Bug in :meth:`DataFrame.ewm.cov` was throwing ``AssertionError`` for :class:`MultiIndex` inputs (:issue:`34440`) - Bug in :meth:`core.groupby.DataFrameGroupBy.transform` when ``func='nunique'`` and columns are of type ``datetime64``, the result would also be of type ``datetime64`` instead of ``int64`` (:issue:`35109`) +- Bug in :meth:'DataFrameGroupBy.first' and :meth:'DataFrameGroupBy.last' that would raise an unnecessary ``ValueError`` when grouping on multiple ``Categoricals`` (:issue:`34951`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1f49ee2b0b665..093e1d4ab3942 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1058,7 +1058,11 @@ def _cython_agg_blocks( # reductions; see GH#28949 obj = obj.iloc[:, 0] - s = get_groupby(obj, self.grouper) + # Create SeriesGroupBy with observed=True so that it does + # not try to add missing categories if grouping over multiple + # Categoricals. This will done by later self._reindex_output() + # Doing it here creates an error. See GH#34951 + s = get_groupby(obj, self.grouper, observed=True) try: result = s.aggregate(lambda x: alt(x, axis=self.axis)) except TypeError: diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 118d928ac02f4..7e4513da37dc9 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1669,3 +1669,53 @@ def test_categorical_transform(): expected["status"] = expected["status"].astype(delivery_status_type) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", ["first", "last"]) +def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals( + func: str, observed: bool +): + # GH 34951 + cat = pd.Categorical([0, 0, 1, 1]) + val = [0, 1, 1, 0] + df = pd.DataFrame({"a": cat, "b": cat, "c": val}) + + idx = pd.Categorical([0, 1]) + idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"]) + expected_dict = { + "first": pd.Series([0, np.NaN, np.NaN, 1], idx, name="c"), + "last": pd.Series([1, np.NaN, np.NaN, 0], idx, name="c"), + } + + expected = expected_dict[func] + if observed: + expected = expected.dropna().astype(np.int64) + + srs_grp = df.groupby(["a", "b"], observed=observed)["c"] + result = getattr(srs_grp, func)() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", ["first", "last"]) +def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals( + func: str, observed: bool +): + # GH 34951 + cat = pd.Categorical([0, 0, 1, 1]) + val = [0, 1, 1, 0] + df = pd.DataFrame({"a": cat, "b": cat, "c": val}) + + idx = pd.Categorical([0, 1]) + idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"]) + expected_dict = { + "first": pd.Series([0, np.NaN, np.NaN, 1], idx, name="c"), + "last": pd.Series([1, np.NaN, np.NaN, 0], idx, name="c"), + } + + expected = expected_dict[func].to_frame() + if observed: + expected = expected.dropna().astype(np.int64) + + df_grp = df.groupby(["a", "b"], observed=observed) + result = getattr(df_grp, func)() + tm.assert_frame_equal(result, expected)
- [x] closes #34951 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Within `DataFrameGroupBy._cython_agg_blocks`, if it is aggregating a one-column DataFrame, it creates a `SeriresGroupBy`, calls the function on that and takes the returned values. But the `SeriesGroupBy` also does the missing-categories reindexing. The `DataFrameGroupBy` ends up with values that contain the missing categories, and an index that does not. When they are passed into a BlockManager it raises a `ValueError` stating that their lengths don't match. Solutions is to have `_cython_agg_blocks` create a `SeriesGroupBy` with `observed=True` so it doesn't do any reindexing. The reindexing is left to the calling `DataFrameGroupBy` This also explains why error only occurred in `DataFrameGroupBy` but not `SeriesGroupBy`.
https://api.github.com/repos/pandas-dev/pandas/pulls/35253
2020-07-12T22:31:33Z
2020-07-13T22:22:46Z
2020-07-13T22:22:45Z
2020-07-13T22:22:54Z
REF: de-duplicate get_resolution
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx index bdc00f6c6e21a..b23f8255a76ac 100644 --- a/pandas/_libs/tslibs/vectorized.pyx +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -211,49 +211,40 @@ def get_resolution(const int64_t[:] stamps, tzinfo tz=None): int reso = RESO_DAY, curr_reso ndarray[int64_t] trans int64_t[:] deltas - Py_ssize_t[:] pos - int64_t local_val, delta + intp_t[:] pos + int64_t local_val, delta = NPY_NAT + bint use_utc = False, use_tzlocal = False, use_fixed = False if is_utc(tz) or tz is None: - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i], &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso + use_utc = True elif is_tzlocal(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - continue - local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - dt64_to_dtstruct(local_val, &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso + use_tzlocal = True else: - # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - if typ not in ["pytz", "dateutil"]: # static/fixed; in this case we know that len(delta) == 1 + use_fixed = True delta = deltas[0] - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i] + delta, &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso else: pos = trans.searchsorted(stamps, side="right") - 1 - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso + + for i in range(n): + if stamps[i] == NPY_NAT: + continue + + if use_utc: + local_val = stamps[i] + elif use_tzlocal: + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + elif use_fixed: + local_val = stamps[i] + delta + else: + local_val = stamps[i] + deltas[pos[i]] + + dt64_to_dtstruct(local_val, &dts) + curr_reso = _reso_stamp(&dts) + if curr_reso < reso: + reso = curr_reso return Resolution(reso)
This changes get_resolution to use the same less-verbose pattern as ints_to_pydatetime; hopefully we'll be able to refactor out a couple of helper functions. asv run says this is perf-neutral
https://api.github.com/repos/pandas-dev/pandas/pulls/35245
2020-07-12T04:33:31Z
2020-08-03T23:22:04Z
2020-08-03T23:22:04Z
2020-08-04T02:01:27Z
ASV: dt64arr_to_periodarr
diff --git a/asv_bench/benchmarks/tslibs/period.py b/asv_bench/benchmarks/tslibs/period.py index 1a2c89b48c665..849e8ec864ac2 100644 --- a/asv_bench/benchmarks/tslibs/period.py +++ b/asv_bench/benchmarks/tslibs/period.py @@ -9,7 +9,12 @@ from pandas.tseries.frequencies import to_offset -from .tslib import _sizes +from .tslib import _sizes, _tzs + +try: + from pandas._libs.tslibs.vectorized import dt64arr_to_periodarr +except ImportError: + from pandas._libs.tslibs.period import dt64arr_to_periodarr class PeriodProperties: @@ -75,26 +80,29 @@ def time_period_constructor(self, freq, is_offset): Period("2012-06-01", freq=freq) +_freq_ints = [ + 1000, + 1011, # Annual - November End + 2000, + 2011, # Quarterly - November End + 3000, + 4000, + 4006, # Weekly - Saturday End + 5000, + 6000, + 7000, + 8000, + 9000, + 10000, + 11000, + 12000, +] + + class TimePeriodArrToDT64Arr: params = [ _sizes, - [ - 1000, - 1011, # Annual - November End - 2000, - 2011, # Quarterly - November End - 3000, - 4000, - 4006, # Weekly - Saturday End - 5000, - 6000, - 7000, - 8000, - 9000, - 10000, - 11000, - 12000, - ], + _freq_ints, ] param_names = ["size", "freq"] @@ -104,3 +112,19 @@ def setup(self, size, freq): def time_periodarray_to_dt64arr(self, size, freq): periodarr_to_dt64arr(self.i8values, freq) + + +class TimeDT64ArrToPeriodArr: + params = [ + _sizes, + _freq_ints, + _tzs, + ] + param_names = ["size", "freq", "tz"] + + def setup(self, size, freq, tz): + arr = np.arange(10, dtype="i8").repeat(size // 10) + self.i8values = arr + + def time_dt64arr_to_periodarr(self, size, freq, tz): + dt64arr_to_periodarr(self.i8values, freq, tz)
https://api.github.com/repos/pandas-dev/pandas/pulls/35244
2020-07-12T01:47:05Z
2020-07-14T17:16:30Z
2020-07-14T17:16:30Z
2020-07-14T18:43:27Z
CLN: annotate
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index 2d859db22ea23..a98820ca57895 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -15,7 +15,7 @@ DEF dROUNDS = 4 @cython.boundscheck(False) -def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'): +def hash_object_array(ndarray[object] arr, str key, str encoding="utf8"): """ Parameters ---------- diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index d70d0378a2621..35d5cd8f1e275 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -5,6 +5,7 @@ from cpython.datetime cimport ( PyDateTime_Check, PyDateTime_IMPORT, datetime, + tzinfo, ) # import datetime C API PyDateTime_IMPORT @@ -93,8 +94,8 @@ def _test_parse_iso8601(ts: str): @cython.boundscheck(False) def format_array_from_datetime( ndarray[int64_t] values, - object tz=None, - object format=None, + tzinfo tz=None, + str format=None, object na_rep=None ): """ @@ -103,8 +104,8 @@ def format_array_from_datetime( Parameters ---------- values : a 1-d i8 array - tz : the timezone (or None) - format : optional, default is None + tz : tzinfo or None, default None + format : str or None, default None a strftime capable string na_rep : optional, default is None a nat format @@ -360,7 +361,7 @@ cpdef array_to_datetime( str errors='raise', bint dayfirst=False, bint yearfirst=False, - object utc=None, + bint utc=False, bint require_iso8601=False ): """ @@ -386,7 +387,7 @@ cpdef array_to_datetime( dayfirst parsing behavior when encountering datetime strings yearfirst : bool, default False yearfirst parsing behavior when encountering datetime strings - utc : bool, default None + utc : bool, default False indicator whether the dates should be UTC require_iso8601 : bool, default False indicator whether the datetime string should be iso8601 @@ -412,7 +413,7 @@ cpdef array_to_datetime( bint is_same_offsets _TSObject _ts int64_t value - int out_local=0, out_tzoffset=0 + int out_local = 0, out_tzoffset = 0 float offset_seconds, tz_offset set out_tzoffset_vals = set() bint string_to_dts_failed @@ -659,7 +660,7 @@ cdef array_to_datetime_object( ndarray[object] values, str errors, bint dayfirst=False, - bint yearfirst=False + bint yearfirst=False, ): """ Fall back function for array_to_datetime @@ -671,7 +672,7 @@ cdef array_to_datetime_object( ---------- values : ndarray of object date-like objects to convert - errors : str, default 'raise' + errors : str error behavior when parsing dayfirst : bool, default False dayfirst parsing behavior when encountering datetime strings @@ -684,7 +685,7 @@ cdef array_to_datetime_object( """ cdef: Py_ssize_t i, n = len(values) - object val, + object val bint is_ignore = errors == 'ignore' bint is_coerce = errors == 'coerce' bint is_raise = errors == 'raise' diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 884578df3e00b..660b582f73e6e 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -5,7 +5,7 @@ import locale import calendar import re -from cpython cimport datetime +from cpython.datetime cimport date, tzinfo from _thread import allocate_lock as _thread_allocate_lock @@ -291,20 +291,20 @@ def array_strptime(object[:] values, object fmt, bint exact=True, errors='raise' elif iso_year != -1 and iso_week != -1: year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1) - # Cannot pre-calculate datetime.date() since can change in Julian + # Cannot pre-calculate date() since can change in Julian # calculation and thus could have different value for the day of the wk # calculation. try: if julian == -1: # Need to add 1 to result since first day of the year is 1, not # 0. - ordinal = datetime.date(year, month, day).toordinal() - julian = ordinal - datetime.date(year, 1, 1).toordinal() + 1 + ordinal = date(year, month, day).toordinal() + julian = ordinal - date(year, 1, 1).toordinal() + 1 else: # Assume that if they bothered to include Julian day it will # be accurate. - datetime_result = datetime.date.fromordinal( - (julian - 1) + datetime.date(year, 1, 1).toordinal()) + datetime_result = date.fromordinal( + (julian - 1) + date(year, 1, 1).toordinal()) year = datetime_result.year month = datetime_result.month day = datetime_result.day @@ -314,7 +314,7 @@ def array_strptime(object[:] values, object fmt, bint exact=True, errors='raise' continue raise if weekday == -1: - weekday = datetime.date(year, month, day).weekday() + weekday = date(year, month, day).weekday() dts.year = year dts.month = month @@ -652,7 +652,7 @@ cdef int _calc_julian_from_U_or_W(int year, int week_of_year, cdef: int first_weekday, week_0_length, days_to_week - first_weekday = datetime.date(year, 1, 1).weekday() + first_weekday = date(year, 1, 1).weekday() # If we are dealing with the %U directive (week starts on Sunday), it's # easier to just shift the view to Sunday being the first day of the # week. @@ -695,18 +695,18 @@ cdef (int, int) _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday) cdef: int correction, ordinal - correction = datetime.date(iso_year, 1, 4).isoweekday() + 3 + correction = date(iso_year, 1, 4).isoweekday() + 3 ordinal = (iso_week * 7) + iso_weekday - correction # ordinal may be negative or 0 now, which means the date is in the previous # calendar year if ordinal < 1: - ordinal += datetime.date(iso_year, 1, 1).toordinal() + ordinal += date(iso_year, 1, 1).toordinal() iso_year -= 1 - ordinal -= datetime.date(iso_year, 1, 1).toordinal() + ordinal -= date(iso_year, 1, 1).toordinal() return iso_year, ordinal -cdef parse_timezone_directive(str z): +cdef tzinfo parse_timezone_directive(str z): """ Parse the '%z' directive and return a pytz.FixedOffset
https://api.github.com/repos/pandas-dev/pandas/pulls/35242
2020-07-11T23:51:52Z
2020-07-13T12:35:01Z
2020-07-13T12:35:01Z
2020-07-13T14:43:33Z
TST: GH20676 Verify equals operator for list of Numpy arrays
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 94747a52136c4..5e66925a38ec6 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -8,7 +8,7 @@ from pandas.core.dtypes.common import is_scalar import pandas as pd -from pandas import DataFrame, MultiIndex, Series, date_range +from pandas import DataFrame, Series, date_range import pandas._testing as tm import pandas.core.common as com @@ -785,26 +785,6 @@ def test_depr_take_kwarg_is_copy(self, is_copy): s.take([0, 1], is_copy=is_copy) def test_equals(self): - s1 = pd.Series([1, 2, 3], index=[0, 2, 1]) - s2 = s1.copy() - assert s1.equals(s2) - - s1[1] = 99 - assert not s1.equals(s2) - - # NaNs compare as equal - s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3]) - s2 = s1.copy() - assert s1.equals(s2) - - s2[0] = 9.9 - assert not s1.equals(s2) - - idx = MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")]) - s1 = Series([1, 2, np.nan], index=idx) - s2 = s1.copy() - assert s1.equals(s2) - # Add object dtype column with nans index = np.random.random(10) df1 = DataFrame(np.random.random(10), index=index, columns=["floats"]) @@ -857,21 +837,6 @@ def test_equals(self): df2 = df1.set_index(["floats"], append=True) assert df3.equals(df2) - # GH 8437 - a = pd.Series([False, np.nan]) - b = pd.Series([False, np.nan]) - c = pd.Series(index=range(2), dtype=object) - d = c.copy() - e = c.copy() - f = c.copy() - c[:-1] = d[:-1] = e[0] = f[0] = False - assert a.equals(a) - assert a.equals(b) - assert a.equals(c) - assert a.equals(d) - assert a.equals(e) - assert e.equals(f) - def test_pipe(self): df = DataFrame({"A": [1, 2, 3]}) f = lambda x, y: x ** y diff --git a/pandas/tests/series/methods/test_equals.py b/pandas/tests/series/methods/test_equals.py new file mode 100644 index 0000000000000..600154adfcda3 --- /dev/null +++ b/pandas/tests/series/methods/test_equals.py @@ -0,0 +1,55 @@ +import numpy as np +import pytest + +from pandas import MultiIndex, Series + + +@pytest.mark.parametrize( + "arr, idx", + [ + ([1, 2, 3, 4], [0, 2, 1, 3]), + ([1, np.nan, 3, np.nan], [0, 2, 1, 3]), + ( + [1, np.nan, 3, np.nan], + MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c"), (3, "c")]), + ), + ], +) +def test_equals(arr, idx): + s1 = Series(arr, index=idx) + s2 = s1.copy() + assert s1.equals(s2) + + s1[1] = 9 + assert not s1.equals(s2) + + +def test_equals_list_array(): + # GH20676 Verify equals operator for list of Numpy arrays + arr = np.array([1, 2]) + s1 = Series([arr, arr]) + s2 = s1.copy() + assert s1.equals(s2) + + # TODO: Series equals should also work between single value and list + # s1[1] = 9 + # assert not s1.equals(s2) + + +def test_equals_false_negative(): + # GH8437 Verify false negative behavior of equals function for dtype object + arr = [False, np.nan] + s1 = Series(arr) + s2 = s1.copy() + s3 = Series(index=range(2), dtype=object) + s4 = s3.copy() + s5 = s3.copy() + s6 = s3.copy() + + s3[:-1] = s4[:-1] = s5[0] = s6[0] = False + assert s1.equals(s1) + assert s1.equals(s2) + assert s1.equals(s3) + assert s1.equals(s4) + assert s1.equals(s5) + assert s5.equals(s6)
- [x] closes #20676 - [ x ] tests added / passed - [ x ] passes `black pandas` - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ x ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35237
2020-07-11T17:38:42Z
2020-07-13T16:36:12Z
2020-07-13T16:36:11Z
2020-07-13T16:36:15Z
TST: added tests for sparse and date range quantiles
diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 0eec30cbc5c67..0b8f1e0495155 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -7,14 +7,29 @@ class TestDataFrameQuantile: - def test_quantile_sparse(self): + @pytest.mark.parametrize( + "df,expected", + [ + [ + pd.DataFrame( + { + 0: pd.Series(pd.arrays.SparseArray([1, 2])), + 1: pd.Series(pd.arrays.SparseArray([3, 4])), + } + ), + pd.Series([1.5, 3.5], name=0.5), + ], + [ + pd.DataFrame(pd.Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")), + pd.Series([1.0], name=0.5), + ], + ], + ) + def test_quantile_sparse(self, df, expected): # GH#17198 - s = pd.Series(pd.arrays.SparseArray([1, 2])) - s1 = pd.Series(pd.arrays.SparseArray([3, 4])) - df = pd.DataFrame({0: s, 1: s1}) + # GH#24600 result = df.quantile() - expected = pd.Series([1.5, 3.5], name=0.5) tm.assert_series_equal(result, expected) def test_quantile(self, datetime_frame): @@ -59,6 +74,20 @@ def test_quantile(self, datetime_frame): expected = Series([3.0, 4.0], index=[0, 1], name=0.5) tm.assert_series_equal(result, expected) + def test_quantile_date_range(self): + # GH 2460 + + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + ser = pd.Series(dti) + df = pd.DataFrame(ser) + + result = df.quantile(numeric_only=False) + expected = pd.Series( + ["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]" + ) + + tm.assert_series_equal(result, expected) + def test_quantile_axis_mixed(self): # mixed on axis=1
- [x] closes #24600 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Added tests for sparse and date range quantiles
https://api.github.com/repos/pandas-dev/pandas/pulls/35236
2020-07-11T17:00:02Z
2020-07-16T01:31:46Z
2020-07-16T01:31:45Z
2020-07-16T15:29:24Z
CLN/DOC: DataFrame.to_parquet supports file-like objects
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cfe5621fec14e..df017bb0c1f0f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -19,6 +19,7 @@ IO, TYPE_CHECKING, Any, + AnyStr, Dict, FrozenSet, Hashable, @@ -2252,11 +2253,11 @@ def to_markdown( @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_parquet( self, - path, - engine="auto", - compression="snappy", - index=None, - partition_cols=None, + path: FilePathOrBuffer[AnyStr], + engine: str = "auto", + compression: Optional[str] = "snappy", + index: Optional[bool] = None, + partition_cols: Optional[List[str]] = None, **kwargs, ) -> None: """ @@ -2269,9 +2270,12 @@ def to_parquet( Parameters ---------- - path : str - File path or Root Directory path. Will be used as Root Directory - path while writing a partitioned dataset. + path : str or file-like object + If a string, it will be used as Root Directory path + when writing a partitioned dataset. By file-like object, + we refer to objects with a write() method, such as a file handler + (e.g. via builtin open function) or io.BytesIO. The engine + fastparquet does not accept file-like objects. .. versionchanged:: 1.0.0 @@ -2298,6 +2302,7 @@ def to_parquet( partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. + Must be None if path is not a string. .. versionadded:: 0.24.0 diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index a0c9242684f0f..8c4b63767ac06 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -1,8 +1,9 @@ """ parquet compat """ -from typing import Any, Dict, Optional +from typing import Any, AnyStr, Dict, List, Optional from warnings import catch_warnings +from pandas._typing import FilePathOrBuffer from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError @@ -85,10 +86,10 @@ def __init__(self): def write( self, df: DataFrame, - path, - compression="snappy", + path: FilePathOrBuffer[AnyStr], + compression: Optional[str] = "snappy", index: Optional[bool] = None, - partition_cols=None, + partition_cols: Optional[List[str]] = None, **kwargs, ): self.validate_dataframe(df) @@ -213,11 +214,11 @@ def read(self, path, columns=None, **kwargs): def to_parquet( df: DataFrame, - path, + path: FilePathOrBuffer[AnyStr], engine: str = "auto", - compression="snappy", + compression: Optional[str] = "snappy", index: Optional[bool] = None, - partition_cols=None, + partition_cols: Optional[List[str]] = None, **kwargs, ): """ @@ -226,9 +227,12 @@ def to_parquet( Parameters ---------- df : DataFrame - path : str - File path or Root Directory path. Will be used as Root Directory path - while writing a partitioned dataset. + path : str or file-like object + If a string, it will be used as Root Directory path + when writing a partitioned dataset. By file-like object, + we refer to objects with a write() method, such as a file handler + (e.g. via builtin open function) or io.BytesIO. The engine + fastparquet does not accept file-like objects. .. versionchanged:: 0.24.0 @@ -251,8 +255,9 @@ def to_parquet( .. versionadded:: 0.24.0 partition_cols : str or list, optional, default None - Column names by which to partition the dataset - Columns are partitioned in the order they are given + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + Must be None if path is not a string. .. versionadded:: 0.24.0
Adds documentation and type-hints for supporting file-like objects when `engine == 'pyarrow'`; relevant to #30081. Tests for this behavior currently exist in `io.test_parquet.py`: ```` @td.skip_if_no("pyarrow") def test_read_file_like_obj_support(self, df_compat): buffer = BytesIO() df_compat.to_parquet(buffer) df_from_buf = pd.read_parquet(buffer) tm.assert_frame_equal(df_compat, df_from_buf) ```` Perhaps the restrictions on the arguments when path is not a string: * partition_cols must be None; and * engine must end up being pyarrow should be checked directly in `DataFrame.to_parquet`, but I'm leaving this out as that is an API change that could be made in a subsequent PR. The latter gives the clear error message `TypeError: expected str, bytes or os.PathLike object, not _io.BytesIO` but the former raises `AttributeError: 'NoneType' object has no attribute '_isfilestore'` which is slightly confusing. Another API change that could be made subsequently is changing the `path` argument to `path_or_buf`, consistent with `DataFrame.to_csv`.
https://api.github.com/repos/pandas-dev/pandas/pulls/35235
2020-07-11T16:50:07Z
2020-07-17T12:33:05Z
2020-07-17T12:33:05Z
2020-07-17T13:08:25Z
TST add test for dtype consistency with pd replace #23305
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 498f7f7790514..a295b1f8baf63 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1420,3 +1420,83 @@ def test_replace_period_ignore_float(self): result = df.replace(1.0, 0.0) expected = pd.DataFrame({"Per": [pd.Period("2020-01")] * 3}) tm.assert_frame_equal(expected, result) + + def test_replace_value_category_type(self): + """ + Test for #23305: to ensure category dtypes are maintained + after replace with direct values + """ + + # create input data + input_dict = { + "col1": [1, 2, 3, 4], + "col2": ["a", "b", "c", "d"], + "col3": [1.5, 2.5, 3.5, 4.5], + "col4": ["cat1", "cat2", "cat3", "cat4"], + "col5": ["obj1", "obj2", "obj3", "obj4"], + } + # explicitly cast columns as category and order them + input_df = pd.DataFrame(data=input_dict).astype( + {"col2": "category", "col4": "category"} + ) + input_df["col2"] = input_df["col2"].cat.reorder_categories( + ["a", "b", "c", "d"], ordered=True + ) + input_df["col4"] = input_df["col4"].cat.reorder_categories( + ["cat1", "cat2", "cat3", "cat4"], ordered=True + ) + + # create expected dataframe + expected_dict = { + "col1": [1, 2, 3, 4], + "col2": ["a", "b", "c", "z"], + "col3": [1.5, 2.5, 3.5, 4.5], + "col4": ["cat1", "catX", "cat3", "cat4"], + "col5": ["obj9", "obj2", "obj3", "obj4"], + } + # explicitly cast columns as category and order them + expected = pd.DataFrame(data=expected_dict).astype( + {"col2": "category", "col4": "category"} + ) + expected["col2"] = expected["col2"].cat.reorder_categories( + ["a", "b", "c", "z"], ordered=True + ) + expected["col4"] = expected["col4"].cat.reorder_categories( + ["cat1", "catX", "cat3", "cat4"], ordered=True + ) + + # replace values in input dataframe + input_df = input_df.replace("d", "z") + input_df = input_df.replace("obj1", "obj9") + result = input_df.replace("cat2", "catX") + + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail( + reason="category dtype gets changed to object type after replace, see #35268", + strict=True, + ) + def test_replace_dict_category_type(self, input_category_df, expected_category_df): + """ + Test to ensure category dtypes are maintained + after replace with dict values + """ + + # create input dataframe + input_dict = {"col1": ["a"], "col2": ["obj1"], "col3": ["cat1"]} + # explicitly cast columns as category + input_df = pd.DataFrame(data=input_dict).astype( + {"col1": "category", "col2": "category", "col3": "category"} + ) + + # create expected dataframe + expected_dict = {"col1": ["z"], "col2": ["obj9"], "col3": ["catX"]} + # explicitly cast columns as category + expected = pd.DataFrame(data=expected_dict).astype( + {"col1": "category", "col2": "category", "col3": "category"} + ) + + # replace values in input dataframe using a dict + result = input_df.replace({"a": "z", "obj1": "obj9", "cat1": "catX"}) + + tm.assert_frame_equal(result, expected)
- [x] closes #23305 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35234
2020-07-11T16:32:19Z
2020-07-16T22:45:58Z
2020-07-16T22:45:57Z
2020-07-16T22:46:02Z
TST: Verify filtering operations on DataFrames with categorical Series
diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py index d94dc8d2ffe00..25ad9063e7418 100644 --- a/pandas/tests/frame/indexing/test_categorical.py +++ b/pandas/tests/frame/indexing/test_categorical.py @@ -391,3 +391,14 @@ def test_loc_indexing_preserves_index_category_dtype(self): result = df.loc[["a"]].index.levels[0] tm.assert_index_equal(result, expected) + + def test_categorical_filtering(self): + # GH22609 Verify filtering operations on DataFrames with categorical Series + df = pd.DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"]) + df["b"] = df.b.astype("category") + + result = df.where(df.a > 0) + expected = df.copy() + expected.loc[0, :] = np.nan + + tm.assert_equal(result, expected)
- [ x ] closes #22609 - [ x ] tests added / passed - [ x ] passes `black pandas` - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ x ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35233
2020-07-11T16:20:29Z
2020-07-15T22:24:07Z
2020-07-15T22:24:06Z
2020-07-15T22:24:10Z
Tst verify return none in tests/frame
diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py index d94dc8d2ffe00..cfc22b9b18729 100644 --- a/pandas/tests/frame/indexing/test_categorical.py +++ b/pandas/tests/frame/indexing/test_categorical.py @@ -326,7 +326,10 @@ def test_assigning_ops(self): df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf) exp_fancy = exp_multi_row.copy() - exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True) + return_value = exp_fancy["cats"].cat.set_categories( + ["a", "b", "c"], inplace=True + ) + assert return_value is None df[df["cats"] == "c"] = ["b", 2] # category c is kept in .categories diff --git a/pandas/tests/frame/indexing/test_mask.py b/pandas/tests/frame/indexing/test_mask.py index 30db6110efc80..23f3a18881782 100644 --- a/pandas/tests/frame/indexing/test_mask.py +++ b/pandas/tests/frame/indexing/test_mask.py @@ -36,12 +36,14 @@ def test_mask_inplace(self): rdf = df.copy() - rdf.where(cond, inplace=True) + return_value = rdf.where(cond, inplace=True) + assert return_value is None tm.assert_frame_equal(rdf, df.where(cond)) tm.assert_frame_equal(rdf, df.mask(~cond)) rdf = df.copy() - rdf.where(cond, -df, inplace=True) + return_value = rdf.where(cond, -df, inplace=True) + assert return_value is None tm.assert_frame_equal(rdf, df.where(cond, -df)) tm.assert_frame_equal(rdf, df.mask(~cond, -df)) diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 24eb424bd5735..d114a3178b686 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -162,7 +162,8 @@ def _check_set(df, cond, check_dtypes=True): econd = cond.reindex_like(df).fillna(True) expected = dfi.mask(~econd) - dfi.where(cond, np.nan, inplace=True) + return_value = dfi.where(cond, np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(dfi, expected) # dtypes (and confirm upcasts)x @@ -303,7 +304,8 @@ def test_where_bug(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(result > 2, np.nan, inplace=True) + return_value = result.where(result > 2, np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) def test_where_bug_mixed(self, sint_dtype): @@ -324,7 +326,8 @@ def test_where_bug_mixed(self, sint_dtype): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(result > 2, np.nan, inplace=True) + return_value = result.where(result > 2, np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) def test_where_bug_transposition(self): @@ -417,7 +420,8 @@ def create(): result = df.where(pd.notna(df), df.mean(), axis="columns") tm.assert_frame_equal(result, expected) - df.where(pd.notna(df), df.mean(), inplace=True, axis="columns") + return_value = df.where(pd.notna(df), df.mean(), inplace=True, axis="columns") + assert return_value is None tm.assert_frame_equal(df, expected) df = create().fillna(0) @@ -453,7 +457,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s, axis="index", inplace=True) + return_value = result.where(mask, s, axis="index", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) expected = DataFrame([[0, 1], [0, 1]], dtype="float64") @@ -461,7 +466,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s, axis="columns", inplace=True) + return_value = result.where(mask, s, axis="columns", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) # Upcast needed @@ -474,7 +480,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s, axis="index", inplace=True) + return_value = result.where(mask, s, axis="index", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) expected = DataFrame([[0, np.nan], [0, np.nan]]) @@ -488,7 +495,8 @@ def test_where_axis(self): } ) result = df.copy() - result.where(mask, s, axis="columns", inplace=True) + return_value = result.where(mask, s, axis="columns", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) # Multiple dtypes (=> multiple Blocks) @@ -511,7 +519,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s1, axis="columns", inplace=True) + return_value = result.where(mask, s1, axis="columns", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) result = df.where(mask, s2, axis="index") @@ -521,7 +530,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s2, axis="index", inplace=True) + return_value = result.where(mask, s2, axis="index", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) # DataFrame vs DataFrame @@ -534,10 +544,12 @@ def test_where_axis(self): result = df.where(mask, d1, axis="index") tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, d1, inplace=True) + return_value = result.where(mask, d1, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, d1, inplace=True, axis="index") + return_value = result.where(mask, d1, inplace=True, axis="index") + assert return_value is None tm.assert_frame_equal(result, expected) d2 = df.copy().drop(1, axis=1) @@ -549,10 +561,12 @@ def test_where_axis(self): result = df.where(mask, d2, axis="columns") tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, d2, inplace=True) + return_value = result.where(mask, d2, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, d2, inplace=True, axis="columns") + return_value = result.where(mask, d2, inplace=True, axis="columns") + assert return_value is None tm.assert_frame_equal(result, expected) def test_where_callable(self): diff --git a/pandas/tests/frame/methods/test_clip.py b/pandas/tests/frame/methods/test_clip.py index 34727da3b95ae..ca62b56664518 100644 --- a/pandas/tests/frame/methods/test_clip.py +++ b/pandas/tests/frame/methods/test_clip.py @@ -22,7 +22,8 @@ def test_inplace_clip(self, float_frame): median = float_frame.median().median() frame_copy = float_frame.copy() - frame_copy.clip(upper=median, lower=median, inplace=True) + return_value = frame_copy.clip(upper=median, lower=median, inplace=True) + assert return_value is None assert not (frame_copy.values != median).any() def test_dataframe_clip(self): diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index 177d10cdbf615..aa44a2427dc8f 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -70,8 +70,10 @@ def test_drop_names(self): df_dropped_b = df.drop("b") df_dropped_e = df.drop("e", axis=1) df_inplace_b, df_inplace_e = df.copy(), df.copy() - df_inplace_b.drop("b", inplace=True) - df_inplace_e.drop("e", axis=1, inplace=True) + return_value = df_inplace_b.drop("b", inplace=True) + assert return_value is None + return_value = df_inplace_e.drop("e", axis=1, inplace=True) + assert return_value is None for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e): assert obj.index.name == "first" assert obj.columns.name == "second" @@ -148,7 +150,8 @@ def test_drop(self): # GH#5628 df = pd.DataFrame(np.random.randn(10, 3), columns=list("abc")) expected = df[~(df.b > 0)] - df.drop(labels=df[df.b > 0].index, inplace=True) + return_value = df.drop(labels=df[df.b > 0].index, inplace=True) + assert return_value is None tm.assert_frame_equal(df, expected) def test_drop_multiindex_not_lexsorted(self): diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index facb116646573..ddb5723e7bd3e 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -246,11 +246,13 @@ def test_interp_inplace(self): df = DataFrame({"a": [1.0, 2.0, np.nan, 4.0]}) expected = DataFrame({"a": [1.0, 2.0, 3.0, 4.0]}) result = df.copy() - result["a"].interpolate(inplace=True) + return_value = result["a"].interpolate(inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) result = df.copy() - result["a"].interpolate(inplace=True, downcast="infer") + return_value = result["a"].interpolate(inplace=True, downcast="infer") + assert return_value is None tm.assert_frame_equal(result, expected.astype("int64")) def test_interp_inplace_row(self): @@ -259,7 +261,8 @@ def test_interp_inplace_row(self): {"a": [1.0, 2.0, 3.0, 4.0], "b": [np.nan, 2.0, 3.0, 4.0], "c": [3, 2, 2, 2]} ) expected = result.interpolate(method="linear", axis=1, inplace=False) - result.interpolate(method="linear", axis=1, inplace=True) + return_value = result.interpolate(method="linear", axis=1, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) def test_interp_ignore_all_good(self): @@ -297,7 +300,8 @@ def test_interp_time_inplace_axis(self, axis): expected = DataFrame(index=idx, columns=idx, data=data) result = expected.interpolate(axis=0, method="time") - expected.interpolate(axis=0, method="time", inplace=True) + return_value = expected.interpolate(axis=0, method="time", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("axis_name, axis_number", [("index", 0), ("columns", 1)]) diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py index ffad526d3f4d1..eb908e9472fe2 100644 --- a/pandas/tests/frame/methods/test_rename.py +++ b/pandas/tests/frame/methods/test_rename.py @@ -150,7 +150,8 @@ def test_rename_inplace(self, float_frame): c_id = id(float_frame["C"]) float_frame = float_frame.copy() - float_frame.rename(columns={"C": "foo"}, inplace=True) + return_value = float_frame.rename(columns={"C": "foo"}, inplace=True) + assert return_value is None assert "C" not in float_frame assert "foo" in float_frame diff --git a/pandas/tests/frame/methods/test_rename_axis.py b/pandas/tests/frame/methods/test_rename_axis.py index 9b964d842526c..3339119841813 100644 --- a/pandas/tests/frame/methods/test_rename_axis.py +++ b/pandas/tests/frame/methods/test_rename_axis.py @@ -10,14 +10,16 @@ def test_rename_axis_inplace(self, float_frame): # GH#15704 expected = float_frame.rename_axis("foo") result = float_frame.copy() - no_return = result.rename_axis("foo", inplace=True) + return_value = no_return = result.rename_axis("foo", inplace=True) + assert return_value is None assert no_return is None tm.assert_frame_equal(result, expected) expected = float_frame.rename_axis("bar", axis=1) result = float_frame.copy() - no_return = result.rename_axis("bar", axis=1, inplace=True) + return_value = no_return = result.rename_axis("bar", axis=1, inplace=True) + assert return_value is None assert no_return is None tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 498f7f7790514..ea72a3d8fef4d 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -27,7 +27,8 @@ def test_replace_inplace(self, datetime_frame, float_string_frame): datetime_frame["A"][-5:] = np.nan tsframe = datetime_frame.copy() - tsframe.replace(np.nan, 0, inplace=True) + return_value = tsframe.replace(np.nan, 0, inplace=True) + assert return_value is None tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) # mixed type @@ -40,7 +41,8 @@ def test_replace_inplace(self, datetime_frame, float_string_frame): tm.assert_frame_equal(result, expected) tsframe = datetime_frame.copy() - tsframe.replace([np.nan], [0], inplace=True) + return_value = tsframe.replace([np.nan], [0], inplace=True) + assert return_value is None tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) def test_regex_replace_scalar(self, mix_ab): @@ -117,18 +119,21 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # regex -> value # obj frame res = dfobj.copy() - res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + assert return_value is None tm.assert_frame_equal(dfobj, res.fillna(".")) # mixed res = dfmix.copy() - res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + assert return_value is None tm.assert_frame_equal(dfmix, res.fillna(".")) # regex -> regex # obj frame res = dfobj.copy() - res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + assert return_value is None objc = obj.copy() objc["a"] = ["a", "b", "...", "..."] expec = DataFrame(objc) @@ -136,7 +141,8 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # with mixed res = dfmix.copy() - res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + assert return_value is None mixc = mix_ab.copy() mixc["b"] = ["a", "b", "...", "..."] expec = DataFrame(mixc) @@ -144,18 +150,27 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # everything with compiled regexs as well res = dfobj.copy() - res.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True) + return_value = res.replace( + re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True + ) + assert return_value is None tm.assert_frame_equal(dfobj, res.fillna(".")) # mixed res = dfmix.copy() - res.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True) + return_value = res.replace( + re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True + ) + assert return_value is None tm.assert_frame_equal(dfmix, res.fillna(".")) # regex -> regex # obj frame res = dfobj.copy() - res.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True) + return_value = res.replace( + re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True + ) + assert return_value is None objc = obj.copy() objc["a"] = ["a", "b", "...", "..."] expec = DataFrame(objc) @@ -163,25 +178,31 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # with mixed res = dfmix.copy() - res.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True) + return_value = res.replace( + re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True + ) + assert return_value is None mixc = mix_ab.copy() mixc["b"] = ["a", "b", "...", "..."] expec = DataFrame(mixc) tm.assert_frame_equal(res, expec) res = dfobj.copy() - res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(dfobj, res.fillna(".")) # mixed res = dfmix.copy() - res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(dfmix, res.fillna(".")) # regex -> regex # obj frame res = dfobj.copy() - res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + assert return_value is None objc = obj.copy() objc["a"] = ["a", "b", "...", "..."] expec = DataFrame(objc) @@ -189,7 +210,8 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # with mixed res = dfmix.copy() - res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + assert return_value is None mixc = mix_ab.copy() mixc["b"] = ["a", "b", "...", "..."] expec = DataFrame(mixc) @@ -197,18 +219,27 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # everything with compiled regexs as well res = dfobj.copy() - res.replace(regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True) + return_value = res.replace( + regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True + ) + assert return_value is None tm.assert_frame_equal(dfobj, res.fillna(".")) # mixed res = dfmix.copy() - res.replace(regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True) + return_value = res.replace( + regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True + ) + assert return_value is None tm.assert_frame_equal(dfmix, res.fillna(".")) # regex -> regex # obj frame res = dfobj.copy() - res.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True) + return_value = res.replace( + regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True + ) + assert return_value is None objc = obj.copy() objc["a"] = ["a", "b", "...", "..."] expec = DataFrame(objc) @@ -216,7 +247,10 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # with mixed res = dfmix.copy() - res.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True) + return_value = res.replace( + regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True + ) + assert return_value is None mixc = mix_ab.copy() mixc["b"] = ["a", "b", "...", "..."] expec = DataFrame(mixc) @@ -290,7 +324,8 @@ def test_regex_replace_list_obj_inplace(self): to_replace_res = [r"\s*\.\s*", r"e|f|g"] values = [np.nan, "crap"] res = dfobj.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame( { "a": ["a", "b", np.nan, np.nan], @@ -304,7 +339,8 @@ def test_regex_replace_list_obj_inplace(self): to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"] values = [r"\1\1", r"\1_crap"] res = dfobj.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame( { "a": ["a", "b", "..", ".."], @@ -319,7 +355,8 @@ def test_regex_replace_list_obj_inplace(self): to_replace_res = [r"\s*(\.)\s*", r"e"] values = [r"\1\1", r"crap"] res = dfobj.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame( { "a": ["a", "b", "..", ".."], @@ -332,7 +369,8 @@ def test_regex_replace_list_obj_inplace(self): to_replace_res = [r"\s*(\.)\s*", r"e"] values = [r"\1\1", r"crap"] res = dfobj.copy() - res.replace(value=values, regex=to_replace_res, inplace=True) + return_value = res.replace(value=values, regex=to_replace_res, inplace=True) + assert return_value is None expec = DataFrame( { "a": ["a", "b", "..", ".."], @@ -391,7 +429,8 @@ def test_regex_replace_list_mixed_inplace(self, mix_ab): to_replace_res = [r"\s*\.\s*", r"a"] values = [np.nan, "crap"] res = dfmix.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b", np.nan, np.nan]}) tm.assert_frame_equal(res, expec) @@ -399,7 +438,8 @@ def test_regex_replace_list_mixed_inplace(self, mix_ab): to_replace_res = [r"\s*(\.)\s*", r"(a|b)"] values = [r"\1\1", r"\1_crap"] res = dfmix.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]}) tm.assert_frame_equal(res, expec) @@ -408,14 +448,16 @@ def test_regex_replace_list_mixed_inplace(self, mix_ab): to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] values = [r"\1\1", r"crap", r"\1_crap"] res = dfmix.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) tm.assert_frame_equal(res, expec) to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] values = [r"\1\1", r"crap", r"\1_crap"] res = dfmix.copy() - res.replace(regex=to_replace_res, value=values, inplace=True) + return_value = res.replace(regex=to_replace_res, value=values, inplace=True) + assert return_value is None expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) tm.assert_frame_equal(res, expec) @@ -430,7 +472,10 @@ def test_regex_replace_dict_mixed(self, mix_abc): # frame res = dfmix.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True) res2 = dfmix.copy() - res2.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True) + return_value = res2.replace( + {"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True + ) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} ) @@ -441,7 +486,10 @@ def test_regex_replace_dict_mixed(self, mix_abc): # whole frame res = dfmix.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True) res2 = dfmix.copy() - res2.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True) + return_value = res2.replace( + {"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True + ) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} ) @@ -450,7 +498,10 @@ def test_regex_replace_dict_mixed(self, mix_abc): res = dfmix.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}) res2 = dfmix.copy() - res2.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True) + return_value = res2.replace( + regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True + ) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} ) @@ -464,13 +515,15 @@ def test_regex_replace_dict_mixed(self, mix_abc): ) res = dfmix.replace("a", {"b": np.nan}, regex=True) res2 = dfmix.copy() - res2.replace("a", {"b": np.nan}, regex=True, inplace=True) + return_value = res2.replace("a", {"b": np.nan}, regex=True, inplace=True) + assert return_value is None tm.assert_frame_equal(res, expec) tm.assert_frame_equal(res2, expec) res = dfmix.replace("a", {"b": np.nan}, regex=True) res2 = dfmix.copy() - res2.replace(regex="a", value={"b": np.nan}, inplace=True) + return_value = res2.replace(regex="a", value={"b": np.nan}, inplace=True) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]} ) @@ -483,9 +536,13 @@ def test_regex_replace_dict_nested(self, mix_abc): res = dfmix.replace({"b": {r"\s*\.\s*": np.nan}}, regex=True) res2 = dfmix.copy() res4 = dfmix.copy() - res2.replace({"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True) + return_value = res2.replace( + {"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True + ) + assert return_value is None res3 = dfmix.replace(regex={"b": {r"\s*\.\s*": np.nan}}) - res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True) + return_value = res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} ) @@ -519,8 +576,14 @@ def test_regex_replace_list_to_scalar(self, mix_abc): res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True) res2 = df.copy() res3 = df.copy() - res2.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True) - res3.replace(regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True) + return_value = res2.replace( + [r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True + ) + assert return_value is None + return_value = res3.replace( + regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True + ) + assert return_value is None tm.assert_frame_equal(res, expec) tm.assert_frame_equal(res2, expec) tm.assert_frame_equal(res3, expec) @@ -530,9 +593,11 @@ def test_regex_replace_str_to_numeric(self, mix_abc): df = DataFrame(mix_abc) res = df.replace(r"\s*\.\s*", 0, regex=True) res2 = df.copy() - res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True) + return_value = res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True) + assert return_value is None res3 = df.copy() - res3.replace(regex=r"\s*\.\s*", value=0, inplace=True) + return_value = res3.replace(regex=r"\s*\.\s*", value=0, inplace=True) + assert return_value is None expec = DataFrame({"a": mix_abc["a"], "b": ["a", "b", 0, 0], "c": mix_abc["c"]}) tm.assert_frame_equal(res, expec) tm.assert_frame_equal(res2, expec) @@ -542,9 +607,11 @@ def test_regex_replace_regex_list_to_numeric(self, mix_abc): df = DataFrame(mix_abc) res = df.replace([r"\s*\.\s*", "b"], 0, regex=True) res2 = df.copy() - res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True) + return_value = res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True) + assert return_value is None res3 = df.copy() - res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True) + return_value = res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", 0, 0, 0], "c": ["a", 0, np.nan, "d"]} ) @@ -558,9 +625,11 @@ def test_regex_replace_series_of_regexes(self, mix_abc): s2 = Series({"b": np.nan}) res = df.replace(s1, s2, regex=True) res2 = df.copy() - res2.replace(s1, s2, inplace=True, regex=True) + return_value = res2.replace(s1, s2, inplace=True, regex=True) + assert return_value is None res3 = df.copy() - res3.replace(regex=s1, value=s2, inplace=True) + return_value = res3.replace(regex=s1, value=s2, inplace=True) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} ) @@ -714,7 +783,8 @@ def test_replace_mixed(self, float_string_frame): result = df.replace(0, 0.5) tm.assert_frame_equal(result, expected) - df.replace(0, 0.5, inplace=True) + return_value = df.replace(0, 0.5, inplace=True) + assert return_value is None tm.assert_frame_equal(df, expected) # int block splitting @@ -942,7 +1012,8 @@ def test_replace_input_formats_listlike(self): result = df.replace(to_rep, values) expected = df.copy() for i in range(len(to_rep)): - expected.replace(to_rep[i], values[i], inplace=True) + return_value = expected.replace(to_rep[i], values[i], inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) msg = r"Replacement lists must match in length\. Expecting 3 got 2" @@ -969,7 +1040,8 @@ def test_replace_input_formats_scalar(self): result = df.replace(to_rep, -1) expected = df.copy() for i in range(len(to_rep)): - expected.replace(to_rep[i], -1, inplace=True) + return_value = expected.replace(to_rep[i], -1, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) def test_replace_limit(self): @@ -1321,7 +1393,8 @@ def test_categorical_replace_with_dict(self, replace_dict, final_data): with pytest.raises(AssertionError, match=msg): # ensure non-inplace call does not affect original tm.assert_frame_equal(df, expected) - df.replace(replace_dict, 3, inplace=True) + return_value = df.replace(replace_dict, 3, inplace=True) + assert return_value is None tm.assert_frame_equal(df, expected) @pytest.mark.parametrize( diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index cf0bbe144caa5..da4bfa9be4881 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -119,7 +119,8 @@ def test_reset_index(self, float_frame): # test resetting in place df = float_frame.copy() resetted = float_frame.reset_index() - df.reset_index(inplace=True) + return_value = df.reset_index(inplace=True) + assert return_value is None tm.assert_frame_equal(df, resetted, check_names=False) df = float_frame.reset_index().set_index(["index", "A", "B"]) @@ -137,7 +138,8 @@ def test_reset_index_name(self): ) assert df.reset_index().index.name is None assert df.reset_index(drop=True).index.name is None - df.reset_index(inplace=True) + return_value = df.reset_index(inplace=True) + assert return_value is None assert df.index.name is None def test_reset_index_level(self): diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py index 5f62697cc3e43..ebe7eabd53b46 100644 --- a/pandas/tests/frame/methods/test_set_index.py +++ b/pandas/tests/frame/methods/test_set_index.py @@ -137,7 +137,8 @@ def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys): if inplace: result = df.copy() - result.set_index(keys, drop=drop, inplace=True) + return_value = result.set_index(keys, drop=drop, inplace=True) + assert return_value is None else: result = df.set_index(keys, drop=drop) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 543d87485d3c4..5216c3be116e0 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -218,25 +218,29 @@ def test_sort_index_inplace(self): unordered = frame.loc[[3, 2, 4, 1]] a_id = id(unordered["A"]) df = unordered.copy() - df.sort_index(inplace=True) + return_value = df.sort_index(inplace=True) + assert return_value is None expected = frame tm.assert_frame_equal(df, expected) assert a_id != id(df["A"]) df = unordered.copy() - df.sort_index(ascending=False, inplace=True) + return_value = df.sort_index(ascending=False, inplace=True) + assert return_value is None expected = frame[::-1] tm.assert_frame_equal(df, expected) # axis=1 unordered = frame.loc[:, ["D", "B", "C", "A"]] df = unordered.copy() - df.sort_index(axis=1, inplace=True) + return_value = df.sort_index(axis=1, inplace=True) + assert return_value is None expected = frame tm.assert_frame_equal(df, expected) df = unordered.copy() - df.sort_index(axis=1, ascending=False, inplace=True) + return_value = df.sort_index(axis=1, ascending=False, inplace=True) + assert return_value is None expected = frame.iloc[:, ::-1] tm.assert_frame_equal(df, expected) @@ -589,7 +593,8 @@ def test_sort_index_level2(self): # inplace rs = frame.copy() - rs.sort_index(level=0, inplace=True) + return_value = rs.sort_index(level=0, inplace=True) + assert return_value is None tm.assert_frame_equal(rs, frame.sort_index(level=0)) def test_sort_index_level_large_cardinality(self): diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py index 1275da01eace9..c60e7e3b1bdb6 100644 --- a/pandas/tests/frame/methods/test_sort_values.py +++ b/pandas/tests/frame/methods/test_sort_values.py @@ -77,22 +77,28 @@ def test_sort_values_inplace(self): ) sorted_df = frame.copy() - sorted_df.sort_values(by="A", inplace=True) + return_value = sorted_df.sort_values(by="A", inplace=True) + assert return_value is None expected = frame.sort_values(by="A") tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by=1, axis=1, inplace=True) + return_value = sorted_df.sort_values(by=1, axis=1, inplace=True) + assert return_value is None expected = frame.sort_values(by=1, axis=1) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by="A", ascending=False, inplace=True) + return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True) + assert return_value is None expected = frame.sort_values(by="A", ascending=False) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by=["A", "B"], ascending=False, inplace=True) + return_value = sorted_df.sort_values( + by=["A", "B"], ascending=False, inplace=True + ) + assert return_value is None expected = frame.sort_values(by=["A", "B"], ascending=False) tm.assert_frame_equal(sorted_df, expected) @@ -544,17 +550,24 @@ def test_sort_values_inplace_key(self, sort_by_key): ) sorted_df = frame.copy() - sorted_df.sort_values(by="A", inplace=True, key=sort_by_key) + return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key) + assert return_value is None expected = frame.sort_values(by="A", key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by=1, axis=1, inplace=True, key=sort_by_key) + return_value = sorted_df.sort_values( + by=1, axis=1, inplace=True, key=sort_by_key + ) + assert return_value is None expected = frame.sort_values(by=1, axis=1, key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by="A", ascending=False, inplace=True, key=sort_by_key) + return_value = sorted_df.sort_values( + by="A", ascending=False, inplace=True, key=sort_by_key + ) + assert return_value is None expected = frame.sort_values(by="A", ascending=False, key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index d5554860c034d..c9fec3215d57f 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -64,7 +64,8 @@ def test_consolidate(self, float_frame): float_frame["F"] = 8.0 assert len(float_frame._mgr.blocks) == 3 - float_frame._consolidate(inplace=True) + return_value = float_frame._consolidate(inplace=True) + assert return_value is None assert len(float_frame._mgr.blocks) == 1 def test_consolidate_inplace(self, float_frame): diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 98a2a33822e3b..628b955a1de92 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -413,7 +413,8 @@ def test_date_index_query(self): df = DataFrame(np.random.randn(n, 3)) df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] tm.assert_frame_equal(res, expec) @@ -425,7 +426,8 @@ def test_date_index_query_with_NaT(self): df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) df.iloc[0, 0] = pd.NaT - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] tm.assert_frame_equal(res, expec) @@ -438,7 +440,8 @@ def test_date_index_query_with_NaT_duplicates(self): d["dates3"] = date_range("1/1/2014", periods=n) df = DataFrame(d) df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser) expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)] tm.assert_frame_equal(res, expec) @@ -759,7 +762,8 @@ def test_date_index_query(self): df = DataFrame(np.random.randn(n, 3)) df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query( "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser ) @@ -773,7 +777,8 @@ def test_date_index_query_with_NaT(self): df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) df.iloc[0, 0] = pd.NaT - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query( "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser ) @@ -787,7 +792,8 @@ def test_date_index_query_with_NaT_duplicates(self): df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None msg = r"'BoolOp' nodes are not implemented" with pytest.raises(NotImplementedError, match=msg): df.query("index < 20130101 < dates3", engine=engine, parser=parser) diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 1634baacf6d6e..6a8f1e7c1aca2 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -473,7 +473,8 @@ def test_stack_ints(self): ) df_named = df.copy() - df_named.columns.set_names(range(3), inplace=True) + return_value = df_named.columns.set_names(range(3), inplace=True) + assert return_value is None tm.assert_frame_equal( df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 2b7b3af8f4705..db7347bb863a5 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -570,7 +570,8 @@ def test_to_csv_headers(self): from_df.to_csv(path, index=False, header=["X", "Y"]) recons = self.read_csv(path) - recons.reset_index(inplace=True) + return_value = recons.reset_index(inplace=True) + assert return_value is None tm.assert_frame_equal(to_df, recons) def test_to_csv_multiindex(self, float_frame, datetime_frame):
verify we return None for all inplace calls in tests/frame related: https://github.com/pandas-dev/pandas/pull/35230
https://api.github.com/repos/pandas-dev/pandas/pulls/35232
2020-07-11T15:57:46Z
2020-07-12T12:05:03Z
2020-07-12T12:05:03Z
2020-07-12T12:05:15Z
BUG: Inconsistent behavior in Index.difference
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 90534c00df621..28e30e95e8bf1 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -953,6 +953,7 @@ Numeric - Bug in :meth:`DataFrame.diff` with ``axis=1`` returning incorrect results with mixed dtypes (:issue:`32995`) - Bug in :meth:`DataFrame.corr` and :meth:`DataFrame.cov` raising when handling nullable integer columns with ``pandas.NA`` (:issue:`33803`) - Bug in :class:`DataFrame` and :class:`Series` addition and subtraction between object-dtype objects and ``datetime64`` dtype objects (:issue:`33824`) +- Bug in :meth:`Index.difference` incorrect results when comparing a :class:`Float64Index` and object :class:`Index` (:issue:`35217`) - Bug in :class:`DataFrame` reductions (e.g. ``df.min()``, ``df.max()``) with ``ExtensionArray`` dtypes (:issue:`34520`, :issue:`32651`) Conversion diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 5020a25c88ff4..731907993d08f 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -400,28 +400,6 @@ def _format_native_types( ) return formatter.get_result_as_array() - def equals(self, other) -> bool: - """ - Determines if two Index objects contain the same elements. - """ - if self is other: - return True - - if not isinstance(other, Index): - return False - - # need to compare nans locations and make sure that they are the same - # since nans don't compare equal this is a bit tricky - try: - if not isinstance(other, Float64Index): - other = self._constructor(other) - if not is_dtype_equal(self.dtype, other.dtype) or self.shape != other.shape: - return False - left, right = self._values, other._values - return ((left == right) | (self._isnan & other._isnan)).all() - except (TypeError, ValueError): - return False - def __contains__(self, other: Any) -> bool: hash(other) if super().__contains__(other): diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 33de0800658f2..a7c5734ef9b02 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -239,6 +239,19 @@ def test_equals_numeric(self): i2 = Float64Index([1.0, np.nan]) assert i.equals(i2) + @pytest.mark.parametrize( + "other", + ( + Int64Index([1, 2]), + Index([1.0, 2.0], dtype=object), + Index([1, 2], dtype=object), + ), + ) + def test_equals_numeric_other_index_type(self, other): + i = Float64Index([1.0, 2.0]) + assert i.equals(other) + assert other.equals(i) + @pytest.mark.parametrize( "vals", [ @@ -635,3 +648,27 @@ def test_uint_index_does_not_convert_to_float64(): tm.assert_index_equal(result.index, expected) tm.assert_equal(result, series[:3]) + + +def test_float64_index_equals(): + # https://github.com/pandas-dev/pandas/issues/35217 + float_index = pd.Index([1.0, 2, 3]) + string_index = pd.Index(["1", "2", "3"]) + + result = float_index.equals(string_index) + assert result is False + + result = string_index.equals(float_index) + assert result is False + + +def test_float64_index_difference(): + # https://github.com/pandas-dev/pandas/issues/35217 + float_index = pd.Index([1.0, 2, 3]) + string_index = pd.Index(["1", "2", "3"]) + + result = float_index.difference(string_index) + tm.assert_index_equal(result, float_index) + + result = string_index.difference(float_index) + tm.assert_index_equal(result, string_index)
- [ ] closes #35217 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35231
2020-07-11T13:23:39Z
2020-07-16T09:55:15Z
2020-07-16T09:55:14Z
2020-07-16T09:57:00Z
TST verify return none inplace in tests/indexing
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index bfff58d05007f..17ac2307b9da6 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1208,8 +1208,10 @@ def test_constructor_single_row(self): data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])] result = DataFrame(data) - expected = DataFrame.from_dict(dict(zip([0], data)), orient="index") - tm.assert_frame_equal(result, expected.reindex(result.index)) + expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex( + result.index + ) + tm.assert_frame_equal(result, expected) def test_constructor_ordered_dict_preserve_order(self): # see gh-13304 diff --git a/pandas/tests/indexing/multiindex/test_indexing_slow.py b/pandas/tests/indexing/multiindex/test_indexing_slow.py index ea4453b8dd6eb..be193e0854d8d 100644 --- a/pandas/tests/indexing/multiindex/test_indexing_slow.py +++ b/pandas/tests/indexing/multiindex/test_indexing_slow.py @@ -34,12 +34,15 @@ def validate(mi, df, key): right = df[mask].copy() if i + 1 != len(key): # partial key - right.drop(cols[: i + 1], axis=1, inplace=True) - right.set_index(cols[i + 1 : -1], inplace=True) + return_value = right.drop(cols[: i + 1], axis=1, inplace=True) + assert return_value is None + return_value = right.set_index(cols[i + 1 : -1], inplace=True) + assert return_value is None tm.assert_frame_equal(mi.loc[key[: i + 1]], right) else: # full key - right.set_index(cols[:-1], inplace=True) + return_value = right.set_index(cols[:-1], inplace=True) + assert return_value is None if len(right) == 1: # single hit right = Series( right["jolia"].values, name=right.index[0], index=["jolia"] diff --git a/pandas/tests/indexing/multiindex/test_ix.py b/pandas/tests/indexing/multiindex/test_ix.py index 01b0b392d52a3..abf989324e4a5 100644 --- a/pandas/tests/indexing/multiindex/test_ix.py +++ b/pandas/tests/indexing/multiindex/test_ix.py @@ -35,7 +35,8 @@ def test_loc_general(self): tm.assert_frame_equal(df.loc[key], df.iloc[2:]) # this is ok - df.sort_index(inplace=True) + return_value = df.sort_index(inplace=True) + assert return_value is None res = df.loc[key] # col has float dtype, result should be Float64Index diff --git a/pandas/tests/indexing/multiindex/test_sorted.py b/pandas/tests/indexing/multiindex/test_sorted.py index fdeb3ce95b0bb..572cb9da405d1 100644 --- a/pandas/tests/indexing/multiindex/test_sorted.py +++ b/pandas/tests/indexing/multiindex/test_sorted.py @@ -43,8 +43,10 @@ def test_frame_getitem_not_sorted2(self, key): df2 = df.set_index(["col1", "col2"]) df2_original = df2.copy() - df2.index.set_levels(["b", "d", "a"], level="col1", inplace=True) - df2.index.set_codes([0, 1, 0, 2], level="col1", inplace=True) + return_value = df2.index.set_levels(["b", "d", "a"], level="col1", inplace=True) + assert return_value is None + return_value = df2.index.set_codes([0, 1, 0, 2], level="col1", inplace=True) + assert return_value is None assert not df2.index.is_lexsorted() assert not df2.index.is_monotonic diff --git a/pandas/tests/indexing/multiindex/test_xs.py b/pandas/tests/indexing/multiindex/test_xs.py index ff748d755c063..b807795b9c309 100644 --- a/pandas/tests/indexing/multiindex/test_xs.py +++ b/pandas/tests/indexing/multiindex/test_xs.py @@ -237,9 +237,11 @@ def test_series_getitem_multiindex_xs_by_label(): [("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")] ) s = Series([1, 2, 3, 4], index=idx) - s.index.set_names(["L1", "L2"], inplace=True) + return_value = s.index.set_names(["L1", "L2"], inplace=True) + assert return_value is None expected = Series([1, 3], index=["a", "b"]) - expected.index.set_names(["L1"], inplace=True) + return_value = expected.index.set_names(["L1"], inplace=True) + assert return_value is None result = s.xs("one", level="L2") tm.assert_series_equal(result, expected)
verify we return none for all inplace calls in tests/indexing related: https://github.com/pandas-dev/pandas/pull/35210
https://api.github.com/repos/pandas-dev/pandas/pulls/35230
2020-07-11T13:14:55Z
2020-07-13T20:36:05Z
2020-07-13T20:36:05Z
2020-07-13T20:36:08Z
Place the calculation of mask prior to the calls of comp in replace_list to improve performance
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index c82670106d3b6..d5947726af7fd 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -596,18 +596,22 @@ def replace_list( # figure out our mask apriori to avoid repeated replacements values = self.as_array() - def comp(s, regex=False): + def comp(s: Scalar, mask: np.ndarray, regex: bool = False): """ Generate a bool array by perform an equality check, or perform an element-wise regular expression matching """ if isna(s): - return isna(values) + return ~mask s = com.maybe_box_datetimelike(s) - return _compare_or_regex_search(values, s, regex) + return _compare_or_regex_search(values, s, regex, mask) - masks = [comp(s, regex) for s in src_list] + # Calculate the mask once, prior to the call of comp + # in order to avoid repeating the same computations + mask = ~isna(values) + + masks = [comp(s, mask, regex) for s in src_list] result_blocks = [] src_len = len(src_list) - 1 @@ -1895,7 +1899,7 @@ def _merge_blocks( def _compare_or_regex_search( - a: ArrayLike, b: Scalar, regex: bool = False + a: ArrayLike, b: Scalar, regex: bool = False, mask: Optional[ArrayLike] = None ) -> Union[ArrayLike, bool]: """ Compare two array_like inputs of the same shape or two scalar values @@ -1908,6 +1912,7 @@ def _compare_or_regex_search( a : array_like b : scalar regex : bool, default False + mask : array_like or None (default) Returns ------- @@ -1941,7 +1946,7 @@ def _check_comparison_types( ) # GH#32621 use mask to avoid comparing to NAs - if isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): + if mask is None and isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): mask = np.reshape(~(isna(a)), a.shape) if isinstance(a, np.ndarray): a = a[mask] @@ -1953,7 +1958,7 @@ def _check_comparison_types( result = op(a) - if isinstance(result, np.ndarray): + if isinstance(result, np.ndarray) and mask is not None: # The shape of the mask can differ to that of the result # since we may compare only a subset of a's or b's elements tmp = np.zeros(mask.shape, dtype=np.bool_)
- [X] closes #33920 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35229
2020-07-11T13:06:40Z
2020-07-15T12:27:23Z
2020-07-15T12:27:22Z
2020-07-16T07:08:44Z
TST: added test for groupby/apply timezone-aware with copy
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 1945647ced08f..aa10f44670361 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -995,3 +995,18 @@ def test_apply_function_with_indexing_return_column(): result = df.groupby("foo1", as_index=False).apply(lambda x: x.mean()) expected = DataFrame({"foo1": ["one", "three", "two"], "foo2": [3.0, 4.0, 4.0]}) tm.assert_frame_equal(result, expected) + + +def test_apply_with_timezones_aware(): + # GH: 27212 + + dates = ["2001-01-01"] * 2 + ["2001-01-02"] * 2 + ["2001-01-03"] * 2 + index_no_tz = pd.DatetimeIndex(dates) + index_tz = pd.DatetimeIndex(dates, tz="UTC") + df1 = pd.DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_no_tz}) + df2 = pd.DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_tz}) + + result1 = df1.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) + result2 = df2.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) + + tm.assert_frame_equal(result1, result2)
- [x] closes #27212 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry added test for groupby/apply timezone-aware with copy
https://api.github.com/repos/pandas-dev/pandas/pulls/35225
2020-07-11T01:57:18Z
2020-07-11T04:51:37Z
2020-07-11T04:51:37Z
2020-07-11T11:11:31Z
DEPR: DataFrame.lookup
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index 74abbc9503db0..b11baad1e3eb5 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1480,17 +1480,27 @@ default value. s.get('a') # equivalent to s['a'] s.get('x', default=-1) -The :meth:`~pandas.DataFrame.lookup` method -------------------------------------------- +.. _indexing.lookup: + +Looking up values by index/column labels +---------------------------------------- Sometimes you want to extract a set of values given a sequence of row labels -and column labels, and the ``lookup`` method allows for this and returns a -NumPy array. For instance: +and column labels, this can be achieved by ``DataFrame.melt`` combined by filtering the corresponding +rows with ``DataFrame.loc``. For instance: .. ipython:: python - dflookup = pd.DataFrame(np.random.rand(20, 4), columns = ['A', 'B', 'C', 'D']) - dflookup.lookup(list(range(0, 10, 2)), ['B', 'C', 'A', 'B', 'D']) + df = pd.DataFrame({'col': ["A", "A", "B", "B"], + 'A': [80, 23, np.nan, 22], + 'B': [80, 55, 76, 67]}) + df + melt = df.melt('col') + melt = melt.loc[melt['col'] == melt['variable'], 'value'] + melt.reset_index(drop=True) + +Formerly this could be achieved with the dedicated ``DataFrame.lookup`` method +which was deprecated in version 1.2.0. .. _indexing.class: diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 8b18b56929acd..41cf1218261ba 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -209,6 +209,7 @@ Deprecations - Deprecated parameter ``inplace`` in :meth:`MultiIndex.set_codes` and :meth:`MultiIndex.set_levels` (:issue:`35626`) - Deprecated parameter ``dtype`` in :~meth:`Index.copy` on method all index classes. Use the :meth:`Index.astype` method instead for changing dtype(:issue:`35853`) - Date parser functions :func:`~pandas.io.date_converters.parse_date_time`, :func:`~pandas.io.date_converters.parse_date_fields`, :func:`~pandas.io.date_converters.parse_all_fields` and :func:`~pandas.io.date_converters.generic_parser` from ``pandas.io.date_converters`` are deprecated and will be removed in a future version; use :func:`to_datetime` instead (:issue:`35741`) +- :meth:`DataFrame.lookup` is deprecated and will be removed in a future version, use :meth:`DataFrame.melt` and :meth:`DataFrame.loc` instead (:issue:`18682`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 56dc5e54e1d59..319003e50999f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3843,10 +3843,15 @@ def _series(self): def lookup(self, row_labels, col_labels) -> np.ndarray: """ Label-based "fancy indexing" function for DataFrame. - Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. + .. deprecated:: 1.2.0 + DataFrame.lookup is deprecated, + use DataFrame.melt and DataFrame.loc instead. + For an example see :meth:`~pandas.DataFrame.lookup` + in the user guide. + Parameters ---------- row_labels : sequence @@ -3859,6 +3864,14 @@ def lookup(self, row_labels, col_labels) -> np.ndarray: numpy.ndarray The found values. """ + msg = ( + "The 'lookup' method is deprecated and will be" + "removed in a future version." + "You can use DataFrame.melt and DataFrame.loc" + "as a substitute." + ) + warnings.warn(msg, FutureWarning, stacklevel=2) + n = len(row_labels) if n != len(col_labels): raise ValueError("Row labels must have same size as column labels") diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index e4549dfb3e68d..b947be705a329 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1340,7 +1340,8 @@ def test_lookup_float(self, float_frame): df = float_frame rows = list(df.index) * len(df.columns) cols = list(df.columns) * len(df.index) - result = df.lookup(rows, cols) + with tm.assert_produces_warning(FutureWarning): + result = df.lookup(rows, cols) expected = np.array([df.loc[r, c] for r, c in zip(rows, cols)]) tm.assert_numpy_array_equal(result, expected) @@ -1349,7 +1350,8 @@ def test_lookup_mixed(self, float_string_frame): df = float_string_frame rows = list(df.index) * len(df.columns) cols = list(df.columns) * len(df.index) - result = df.lookup(rows, cols) + with tm.assert_produces_warning(FutureWarning): + result = df.lookup(rows, cols) expected = np.array( [df.loc[r, c] for r, c in zip(rows, cols)], dtype=np.object_ @@ -1365,7 +1367,8 @@ def test_lookup_bool(self): "mask_c": [False, True, False, True], } ) - df["mask"] = df.lookup(df.index, "mask_" + df["label"]) + with tm.assert_produces_warning(FutureWarning): + df["mask"] = df.lookup(df.index, "mask_" + df["label"]) exp_mask = np.array( [df.loc[r, c] for r, c in zip(df.index, "mask_" + df["label"])] @@ -1376,13 +1379,16 @@ def test_lookup_bool(self): def test_lookup_raises(self, float_frame): with pytest.raises(KeyError, match="'One or more row labels was not found'"): - float_frame.lookup(["xyz"], ["A"]) + with tm.assert_produces_warning(FutureWarning): + float_frame.lookup(["xyz"], ["A"]) with pytest.raises(KeyError, match="'One or more column labels was not found'"): - float_frame.lookup([float_frame.index[0]], ["xyz"]) + with tm.assert_produces_warning(FutureWarning): + float_frame.lookup([float_frame.index[0]], ["xyz"]) with pytest.raises(ValueError, match="same size"): - float_frame.lookup(["a", "b", "c"], ["a"]) + with tm.assert_produces_warning(FutureWarning): + float_frame.lookup(["a", "b", "c"], ["a"]) def test_lookup_requires_unique_axes(self): # GH#33041 raise with a helpful error message @@ -1393,14 +1399,17 @@ def test_lookup_requires_unique_axes(self): # homogeneous-dtype case with pytest.raises(ValueError, match="requires unique index and columns"): - df.lookup(rows, cols) + with tm.assert_produces_warning(FutureWarning): + df.lookup(rows, cols) with pytest.raises(ValueError, match="requires unique index and columns"): - df.T.lookup(cols, rows) + with tm.assert_produces_warning(FutureWarning): + df.T.lookup(cols, rows) # heterogeneous dtype df["B"] = 0 with pytest.raises(ValueError, match="requires unique index and columns"): - df.lookup(rows, cols) + with tm.assert_produces_warning(FutureWarning): + df.lookup(rows, cols) def test_set_value(self, float_frame): for idx in float_frame.index: @@ -2232,3 +2241,12 @@ def test_object_casting_indexing_wraps_datetimelike(): assert blk.dtype == "m8[ns]" # we got the right block val = blk.iget((0, 0)) assert isinstance(val, pd.Timedelta) + + +def test_lookup_deprecated(): + # GH18262 + df = pd.DataFrame( + {"col": ["A", "A", "B", "B"], "A": [80, 23, np.nan, 22], "B": [80, 55, 76, 67]} + ) + with tm.assert_produces_warning(FutureWarning): + df.lookup(df.index, df["col"])
- [x] xref #18262 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35224
2020-07-11T00:05:41Z
2020-09-17T02:39:41Z
2020-09-17T02:39:41Z
2021-03-01T10:04:25Z
Fixing a confused method name in 02_read_write.rst
diff --git a/doc/source/getting_started/intro_tutorials/02_read_write.rst b/doc/source/getting_started/intro_tutorials/02_read_write.rst index 12fa2a1e094d6..c6c6bfefc4303 100644 --- a/doc/source/getting_started/intro_tutorials/02_read_write.rst +++ b/doc/source/getting_started/intro_tutorials/02_read_write.rst @@ -151,7 +151,7 @@ named *passengers* instead of the default *Sheet1*. By setting </li> </ul> -The equivalent read function :meth:`~DataFrame.to_excel` will reload the data to a +The equivalent read function :meth:`~DataFrame.read_excel` will reload the data to a ``DataFrame``: .. ipython:: python
Fixed the confused method name. It should be `read_excel` based on the context, but `to_excel` was provided instead. Very simple PR for a simple fix to the documentation.
https://api.github.com/repos/pandas-dev/pandas/pulls/35222
2020-07-10T22:01:17Z
2020-07-10T22:22:51Z
2020-07-10T22:22:51Z
2020-07-10T22:22:55Z
ASV: asvs for normalize functions
diff --git a/asv_bench/benchmarks/tslibs/normalize.py b/asv_bench/benchmarks/tslibs/normalize.py new file mode 100644 index 0000000000000..7d4e0556f4d96 --- /dev/null +++ b/asv_bench/benchmarks/tslibs/normalize.py @@ -0,0 +1,32 @@ +try: + from pandas._libs.tslibs import normalize_i8_timestamps, is_date_array_normalized +except ImportError: + from pandas._libs.tslibs.conversion import ( + normalize_i8_timestamps, + is_date_array_normalized, + ) + +import pandas as pd + +from .tslib import _sizes, _tzs + + +class Normalize: + params = [ + _sizes, + _tzs, + ] + param_names = ["size", "tz"] + + def setup(self, size, tz): + # use an array that will have is_date_array_normalized give True, + # so we do not short-circuit early. + dti = pd.date_range("2016-01-01", periods=10, tz=tz).repeat(size // 10) + self.i8data = dti.asi8 + + def time_normalize_i8_timestamps(self, size, tz): + normalize_i8_timestamps(self.i8data, tz) + + def time_is_date_array_normalized(self, size, tz): + # TODO: cases with different levels of short-circuiting + is_date_array_normalized(self.i8data, tz)
With this, we have pretty good coverage for everything in tslibs.vectorized
https://api.github.com/repos/pandas-dev/pandas/pulls/35221
2020-07-10T21:34:33Z
2020-07-10T22:20:27Z
2020-07-10T22:20:27Z
2020-07-10T23:38:37Z
TYPING/DOC: Move custom type to _typing and add whatsnew
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index d3724112ef455..62fc8c1c5c09a 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -332,6 +332,7 @@ Other enhancements - :meth:`~Series.explode` now accepts ``ignore_index`` to reset the index, similarly to :meth:`pd.concat` or :meth:`DataFrame.sort_values` (:issue:`34932`). - :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable boolean dtype (:issue:`34859`) - :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) +- :meth:`DataFrame.agg` and :meth:`Series.agg` now accept named aggregation for renaming the output columns/indexes. (:issue:`26513`) .. --------------------------------------------------------------------------- diff --git a/pandas/_typing.py b/pandas/_typing.py index 4892abc5f6f51..8e98833ad37f7 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -96,3 +96,11 @@ # DataFrame::sort_index, among others ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]] IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]] + +# types of `func` kwarg for DataFrame.aggregate and Series.aggregate +AggFuncTypeBase = Union[Callable, str] +AggFuncType = Union[ + AggFuncTypeBase, + List[AggFuncTypeBase], + Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]], +] diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 16c4a9f862d79..891048ae82dfd 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -17,7 +17,7 @@ Union, ) -from pandas._typing import Label +from pandas._typing import AggFuncType, Label from pandas.core.dtypes.common import is_dict_like, is_list_like @@ -26,14 +26,6 @@ from pandas.core.indexes.api import Index from pandas.core.series import FrameOrSeriesUnion, Series -# types of `func` kwarg for DataFrame.aggregate and Series.aggregate -AggFuncTypeBase = Union[Callable, str] -AggFuncType = Union[ - AggFuncTypeBase, - List[AggFuncTypeBase], - Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]], -] - def reconstruct_func( func: Optional[AggFuncType], **kwargs,
followup of #29116 details see : https://github.com/pandas-dev/pandas/pull/29116#discussion_r453033204
https://api.github.com/repos/pandas-dev/pandas/pulls/35220
2020-07-10T19:46:06Z
2020-07-11T02:10:20Z
2020-07-11T02:10:20Z
2020-07-11T02:10:28Z
TST add corner cases in test_constructors
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index ab4f7781467e7..ef5c2d539f912 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1178,6 +1178,13 @@ def test_constructor_list_of_odicts(self): expected = DataFrame(index=[0]) tm.assert_frame_equal(result, expected) + def test_constructor_single_row(self): + data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])] + + result = DataFrame(data) + expected = DataFrame.from_dict(dict(zip([0], data)), orient="index") + tm.assert_frame_equal(result, expected.reindex(result.index)) + def test_constructor_ordered_dict_preserve_order(self): # see gh-13304 expected = DataFrame([[2, 1]], columns=["b", "a"]) @@ -1493,16 +1500,17 @@ def test_from_dict_columns_parameter(self): ) @pytest.mark.parametrize( - "data_dict, keys", + "data_dict, keys, orient", [ - ([{("a",): 1}, {("a",): 2}], [("a",)]), - ([OrderedDict([(("a",), 1), (("b",), 2)])], [("a",), ("b",)]), - ([{("a", "b"): 1}], [("a", "b")]), + ({}, [], "index"), + ([{("a",): 1}, {("a",): 2}], [("a",)], "columns"), + ([OrderedDict([(("a",), 1), (("b",), 2)])], [("a",), ("b",)], "columns"), + ([{("a", "b"): 1}], [("a", "b")], "columns"), ], ) - def test_constructor_from_dict_tuples(self, data_dict, keys): + def test_constructor_from_dict_tuples(self, data_dict, keys, orient): # GH 16769 - df = DataFrame.from_dict(data_dict) + df = DataFrame.from_dict(data_dict, orient) result = df.columns expected = Index(keys, dtype="object", tupleize_cols=False)
add two more corner cases to frame/test_constructors.
https://api.github.com/repos/pandas-dev/pandas/pulls/35216
2020-07-10T16:18:31Z
2020-07-11T10:44:57Z
2020-07-11T10:44:56Z
2020-07-11T10:45:09Z
REF: remove IntervalIndex.copy
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index abc82dd3c73f5..d16eb230b9f33 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -460,17 +460,6 @@ def __reduce__(self): d.update(self._get_attributes_dict()) return _new_IntervalIndex, (type(self), d), None - @Appender(_index_shared_docs["copy"]) - def copy(self, deep=False, name=None): - array = self._data - if deep: - array = array.copy() - attributes = self._get_attributes_dict() - if name is not None: - attributes.update(name=name) - - return self._simple_new(array, **attributes) - @Appender(_index_shared_docs["astype"]) def astype(self, dtype, copy=True): with rewrite_exception("IntervalArray", type(self).__name__):
cc @jschendel the existing behavior treats `name` differently than pretty much all our other copy methods. Was there a reason for that? It doesn't appear to be tested.
https://api.github.com/repos/pandas-dev/pandas/pulls/30627
2020-01-02T21:06:42Z
2020-01-03T00:48:24Z
2020-01-03T00:48:24Z
2020-01-03T00:52:19Z
REF: delegate more IntervalIndex methods
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index b2919b45fd6a7..cae9fa949f711 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -201,7 +201,14 @@ def func(intvidx_self, other, sort=False): ) @accessor.delegate_names( delegate=IntervalArray, - accessors=["__array__", "overlaps", "contains", "__len__", "set_closed"], + accessors=[ + "__array__", + "overlaps", + "contains", + "__len__", + "set_closed", + "to_tuples", + ], typ="method", overwrite=True, ) @@ -393,25 +400,6 @@ def __contains__(self, key) -> bool: except KeyError: return False - @Appender( - _interval_shared_docs["to_tuples"] - % dict( - return_type="Index", - examples=""" - Examples - -------- - >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3]) - >>> idx.to_tuples() - Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object') - >>> idx.to_tuples(na_tuple=False) - Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object') - """, - ) - ) - def to_tuples(self, na_tuple=True): - tuples = self._data.to_tuples(na_tuple=na_tuple) - return Index(tuples) - @cache_readonly def _multiindex(self): return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"]) @@ -1004,8 +992,7 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): result = self._data.take( indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs ) - attributes = self._get_attributes_dict() - return self._simple_new(result, **attributes) + return self._shallow_copy(result) def __getitem__(self, value): result = self._data[value] @@ -1206,7 +1193,9 @@ def _delegate_method(self, name, *args, **kwargs): res = method(*args, **kwargs) if is_scalar(res) or name in self._raw_inherit: return res - return type(self)(res, name=self.name) + if isinstance(res, IntervalArray): + return type(self)._simple_new(res, name=self.name) + return Index(res) IntervalIndex._add_logical_methods_disabled()
cc @jschendel is the usage of _simple_new instead of _shallow_copy `take` important? Should it always be the case that `idx.close == idx._data.closed`?
https://api.github.com/repos/pandas-dev/pandas/pulls/30626
2020-01-02T20:41:34Z
2020-01-03T09:59:26Z
2020-01-03T09:59:26Z
2020-01-06T15:38:22Z
TST: Adding test to concat where copy=False for ExtensionArrays
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index 89c9ed3674a66..ec21898852888 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -94,6 +94,19 @@ def test_concat_columns(self, data, na_value): result = pd.concat([df1["A"], df2["B"]], axis=1) self.assert_frame_equal(result, expected) + def test_concat_extension_arrays_copy_false(self, data, na_value): + # GH 20756 + df1 = pd.DataFrame({"A": data[:3]}) + df2 = pd.DataFrame({"B": data[3:7]}) + expected = pd.DataFrame( + { + "A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype), + "B": data[3:7], + } + ) + result = pd.concat([df1, df2], axis=1, copy=False) + self.assert_frame_equal(result, expected) + def test_align(self, data, na_value): a = data[:3] b = data[2:5] diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 6ebe71e173ec2..854bb8adcb100 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -132,6 +132,10 @@ def test_concat_columns(self, data, na_value): self._check_unsupported(data) super().test_concat_columns(data, na_value) + def test_concat_extension_arrays_copy_false(self, data, na_value): + self._check_unsupported(data) + super().test_concat_extension_arrays_copy_false(data, na_value) + def test_align(self, data, na_value): self._check_unsupported(data) super().test_align(data, na_value)
The test ensures that ExtensionArrays are correctly constructed when concat(copy=False) is used. - [x] closes #20756 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30625
2020-01-02T20:03:02Z
2020-01-04T18:21:33Z
2020-01-04T18:21:33Z
2020-01-04T18:21:37Z
whatsnew fixups
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 59644440149ff..088e08d2fbd15 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -174,7 +174,7 @@ You can use the alias ``"boolean"`` as well. Using Numba in ``rolling.apply`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We've added an ``engine`` keyword to :meth:`~Rolling.apply` that allows the user to execute the +We've added an ``engine`` keyword to :meth:`~core.window.rolling.Rolling.apply` that allows the user to execute the routine using `Numba <https://numba.pydata.org/>`__ instead of Cython. Using the Numba engine can yield significant performance gains if the apply function can operate on numpy arrays and the data set is larger (1 million rows or greater). For more details, see @@ -298,45 +298,6 @@ New repr for :class:`~pandas.arrays.IntervalArray` pd.arrays.IntervalArray.from_tuples([(0, 1), (2, 3)]) -All :class:`SeriesGroupBy` aggregation methods now respect the ``observed`` keyword -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The following methods now also correctly output values for unobserved categories when called through ``groupby(..., observed=False)`` (:issue:`17605`) - -- :meth:`SeriesGroupBy.count` -- :meth:`SeriesGroupBy.size` -- :meth:`SeriesGroupBy.nunique` -- :meth:`SeriesGroupBy.nth` - -.. ipython:: python - - df = pd.DataFrame({ - "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), - "cat_2": pd.Categorical(list("AB") * 2, categories=list("ABC")), - "value": [0.1] * 4, - }) - df - - -*pandas 0.25.x* - -.. code-block:: ipython - - In [2]: df.groupby(["cat_1", "cat_2"], observed=False)["value"].count() - Out[2]: - cat_1 cat_2 - A A 1 - B 1 - B A 1 - B 1 - Name: value, dtype: int64 - - -*pandas 1.0.0* - -.. ipython:: python - - df.groupby(["cat_1", "cat_2"], observed=False)["value"].count() - :meth:`pandas.array` inference changes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -540,7 +501,7 @@ Other API changes ^^^^^^^^^^^^^^^^^ - Bumped the minimum supported version of ``s3fs`` from 0.0.8 to 0.3.0 (:issue:`28616`) -- :class:`pandas.core.groupby.GroupBy.transform` now raises on invalid operation names (:issue:`27489`) +- :class:`core.groupby.GroupBy.transform` now raises on invalid operation names (:issue:`27489`) - :meth:`pandas.api.types.infer_dtype` will now return "integer-na" for integer and ``np.nan`` mix (:issue:`27283`) - :meth:`MultiIndex.from_arrays` will no longer infer names from arrays if ``names=None`` is explicitly provided (:issue:`27292`) - In order to improve tab-completion, Pandas does not include most deprecated attributes when introspecting a pandas object using ``dir`` (e.g. ``dir(df)``). @@ -645,7 +606,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - :meth:`DataFrame.to_excel` and :meth:`Series.to_excel` with non-existent columns will no longer reindex (:issue:`17295`) - Removed the previously deprecated keyword "join_axes" from :func:`concat`; use ``reindex_like`` on the result instead (:issue:`22318`) - Removed the previously deprecated keyword "by" from :meth:`DataFrame.sort_index`, use :meth:`DataFrame.sort_values` instead (:issue:`10726`) -- Removed support for nested renaming in :meth:`DataFrame.aggregate`, :meth:`Series.aggregate`, :meth:`DataFrameGroupBy.aggregate`, :meth:`SeriesGroupBy.aggregate`, :meth:`Rolling.aggregate` (:issue:`18529`) +- Removed support for nested renaming in :meth:`DataFrame.aggregate`, :meth:`Series.aggregate`, :meth:`core.groupby.DataFrameGroupBy.aggregate`, :meth:`core.groupby.SeriesGroupBy.aggregate`, :meth:`core.window.rolling.Rolling.aggregate` (:issue:`18529`) - Passing ``datetime64`` data to :class:`TimedeltaIndex` or ``timedelta64`` data to ``DatetimeIndex`` now raises ``TypeError`` (:issue:`23539`, :issue:`23937`) - Passing ``int64`` values to :class:`DatetimeIndex` and a timezone now interprets the values as nanosecond timestamps in UTC, not wall times in the given timezone (:issue:`24559`) - A tuple passed to :meth:`DataFrame.groupby` is now exclusively treated as a single key (:issue:`18314`) @@ -685,7 +646,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - Removed the previously deprecated ``FrozenNDArray`` class in ``pandas.core.indexes.frozen`` (:issue:`29335`) - Removed the previously deprecated keyword "nthreads" from :func:`read_feather`, use "use_threads" instead (:issue:`23053`) - Removed :meth:`Index.is_lexsorted_for_tuple` (:issue:`29305`) -- Removed support for nested renaming in :meth:`DataFrame.aggregate`, :meth:`Series.aggregate`, :meth:`DataFrameGroupBy.aggregate`, :meth:`SeriesGroupBy.aggregate`, :meth:`Rolling.aggregate` (:issue:`29608`) +- Removed support for nested renaming in :meth:`DataFrame.aggregate`, :meth:`Series.aggregate`, :meth:`core.groupby.DataFrameGroupBy.aggregate`, :meth:`core.groupby.SeriesGroupBy.aggregate`, :meth:`core.window.rolling.Rolling.aggregate` (:issue:`29608`) - Removed :meth:`Series.valid`; use :meth:`Series.dropna` instead (:issue:`18800`) - Removed :attr:`DataFrame.is_copy`, :attr:`Series.is_copy` (:issue:`18812`) - Removed :meth:`DataFrame.get_ftype_counts`, :meth:`Series.get_ftype_counts` (:issue:`18243`) @@ -774,6 +735,11 @@ Categorical :class:`Categorical` with duplicate entries, the accessor was skipping duplicates (:issue:`27952`) - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` that would give incorrect results on categorical data (:issue:`26988`) - Bug where calling :meth:`Categorical.min` or :meth:`Categorical.max` on an empty Categorical would raise a numpy exception (:issue:`30227`) +- The following methods now also correctly output values for unobserved categories when called through ``groupby(..., observed=False)`` (:issue:`17605`) + * :meth:`core.groupby.SeriesGroupBy.count` + * :meth:`core.groupby.SeriesGroupBy.size` + * :meth:`core.groupby.SeriesGroupBy.nunique` + * :meth:`core.groupby.SeriesGroupBy.nth` Datetimelike @@ -782,14 +748,14 @@ Datetimelike - Bug in :meth:`Series.dt` property lookups when the underlying data is read-only (:issue:`27529`) - Bug in ``HDFStore.__getitem__`` incorrectly reading tz attribute created in Python 2 (:issue:`26443`) - Bug in :func:`to_datetime` where passing arrays of malformed ``str`` with errors="coerce" could incorrectly lead to raising ``ValueError`` (:issue:`28299`) -- Bug in :meth:`pandas.core.groupby.SeriesGroupBy.nunique` where ``NaT`` values were interfering with the count of unique values (:issue:`27951`) +- Bug in :meth:`core.groupby.SeriesGroupBy.nunique` where ``NaT`` values were interfering with the count of unique values (:issue:`27951`) - Bug in :class:`Timestamp` subtraction when subtracting a :class:`Timestamp` from a ``np.datetime64`` object incorrectly raising ``TypeError`` (:issue:`28286`) - Addition and subtraction of integer or integer-dtype arrays with :class:`Timestamp` will now raise ``NullFrequencyError`` instead of ``ValueError`` (:issue:`28268`) - Bug in :class:`Series` and :class:`DataFrame` with integer dtype failing to raise ``TypeError`` when adding or subtracting a ``np.datetime64`` object (:issue:`28080`) - Bug in :meth:`Series.astype`, :meth:`Index.astype`, and :meth:`DataFrame.astype` failing to handle ``NaT`` when casting to an integer dtype (:issue:`28492`) - Bug in :class:`Week` with ``weekday`` incorrectly raising ``AttributeError`` instead of ``TypeError`` when adding or subtracting an invalid type (:issue:`28530`) - Bug in :class:`DataFrame` arithmetic operations when operating with a :class:`Series` with dtype `'timedelta64[ns]'` (:issue:`28049`) -- Bug in :func:`pandas.core.groupby.generic.SeriesGroupBy.apply` raising ``ValueError`` when a column in the original DataFrame is a datetime and the column labels are not standard integers (:issue:`28247`) +- Bug in :func:`core.groupby.generic.SeriesGroupBy.apply` raising ``ValueError`` when a column in the original DataFrame is a datetime and the column labels are not standard integers (:issue:`28247`) - Bug in :func:`pandas._config.localization.get_locales` where the ``locales -a`` encodes the locales list as windows-1252 (:issue:`23638`, :issue:`24760`, :issue:`27368`) - Bug in :meth:`Series.var` failing to raise ``TypeError`` when called with ``timedelta64[ns]`` dtype (:issue:`28289`) - Bug in :meth:`DatetimeIndex.strftime` and :meth:`Series.dt.strftime` where ``NaT`` was converted to the string ``'NaT'`` instead of ``np.nan`` (:issue:`29578`) @@ -922,24 +888,24 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- Bug in :meth:`DataFrame.groupby.apply` only showing output from a single group when function returns an :class:`Index` (:issue:`28652`) +- Bug in :meth:`core.groupby.DataFrameGroupBy.apply` only showing output from a single group when function returns an :class:`Index` (:issue:`28652`) - Bug in :meth:`DataFrame.groupby` with multiple groups where an ``IndexError`` would be raised if any group contained all NA values (:issue:`20519`) - Bug in :meth:`pandas.core.resample.Resampler.size` and :meth:`pandas.core.resample.Resampler.count` returning wrong dtype when used with an empty series or dataframe (:issue:`28427`) - Bug in :meth:`DataFrame.rolling` not allowing for rolling over datetimes when ``axis=1`` (:issue:`28192`) - Bug in :meth:`DataFrame.rolling` not allowing rolling over multi-index levels (:issue:`15584`). - Bug in :meth:`DataFrame.rolling` not allowing rolling on monotonic decreasing time indexes (:issue:`19248`). - Bug in :meth:`DataFrame.groupby` not offering selection by column name when ``axis=1`` (:issue:`27614`) -- Bug in :meth:`DataFrameGroupby.agg` not able to use lambda function with named aggregation (:issue:`27519`) +- Bug in :meth:`core.groupby.DataFrameGroupby.agg` not able to use lambda function with named aggregation (:issue:`27519`) - Bug in :meth:`DataFrame.groupby` losing column name information when grouping by a categorical column (:issue:`28787`) - Remove error raised due to duplicated input functions in named aggregation in :meth:`DataFrame.groupby` and :meth:`Series.groupby`. Previously error will be raised if the same function is applied on the same column and now it is allowed if new assigned names are different. (:issue:`28426`) -- :meth:`SeriesGroupBy.value_counts` will be able to handle the case even when the :class:`Grouper` makes empty groups (:issue: 28479) -- Bug in :meth:`DataFrameGroupBy.rolling().quantile()` ignoring ``interpolation`` keyword argument (:issue:`28779`) +- :meth:`core.groupby.SeriesGroupBy.value_counts` will be able to handle the case even when the :class:`Grouper` makes empty groups (:issue:`28479`) +- Bug in :meth:`core.window.rolling.Rolling.quantile` ignoring ``interpolation`` keyword argument when used within a groupby (:issue:`28779`) - Bug in :meth:`DataFrame.groupby` where ``any``, ``all``, ``nunique`` and transform functions would incorrectly handle duplicate column labels (:issue:`21668`) -- Bug in :meth:`DataFrameGroupBy.agg` with timezone-aware datetime64 column incorrectly casting results to the original dtype (:issue:`29641`) +- Bug in :meth:`core.groupby.DataFrameGroupBy.agg` with timezone-aware datetime64 column incorrectly casting results to the original dtype (:issue:`29641`) - Bug in :meth:`DataFrame.groupby` when using axis=1 and having a single level columns index (:issue:`30208`) - Bug in :meth:`DataFrame.groupby` when using nunique on axis=1 (:issue:`30253`) - Bug in :meth:`GroupBy.quantile` with multiple list-like q value and integer column names (:issue:`30289`) -- Bug in :meth:`GroupBy.pct_change` and :meth:`SeriesGroupBy.pct_change` causes ``TypeError`` when ``fill_method`` is ``None`` (:issue:`30463`) +- Bug in :meth:`GroupBy.pct_change` and :meth:`core.groupby.SeriesGroupBy.pct_change` causes ``TypeError`` when ``fill_method`` is ``None`` (:issue:`30463`) Reshaping ^^^^^^^^^
https://api.github.com/repos/pandas-dev/pandas/pulls/30624
2020-01-02T19:51:48Z
2020-01-02T21:42:28Z
2020-01-02T21:42:28Z
2020-01-02T21:42:31Z
REF: standardize usage with _make_wrapped_arith_op
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index f957860240dd2..306ccf176f970 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -512,39 +512,10 @@ def _convert_scalar_indexer(self, key, kind=None): return super()._convert_scalar_indexer(key, kind=kind) - @classmethod - def _add_datetimelike_methods(cls): - """ - Add in the datetimelike methods (as we may have to override the - superclass). - """ - - def __add__(self, other): - # dispatch to ExtensionArray implementation - result = self._data.__add__(maybe_unwrap_index(other)) - return wrap_arithmetic_op(self, other, result) - - cls.__add__ = __add__ - - def __radd__(self, other): - # alias for __add__ - return self.__add__(other) - - cls.__radd__ = __radd__ - - def __sub__(self, other): - # dispatch to ExtensionArray implementation - result = self._data.__sub__(maybe_unwrap_index(other)) - return wrap_arithmetic_op(self, other, result) - - cls.__sub__ = __sub__ - - def __rsub__(self, other): - result = self._data.__rsub__(maybe_unwrap_index(other)) - return wrap_arithmetic_op(self, other, result) - - cls.__rsub__ = __rsub__ - + __add__ = _make_wrapped_arith_op("__add__") + __radd__ = _make_wrapped_arith_op("__radd__") + __sub__ = _make_wrapped_arith_op("__sub__") + __rsub__ = _make_wrapped_arith_op("__rsub__") __pow__ = _make_wrapped_arith_op("__pow__") __rpow__ = _make_wrapped_arith_op("__rpow__") __mul__ = _make_wrapped_arith_op("__mul__") diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f6f46d7e66c69..698576a90bb7e 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1165,7 +1165,6 @@ def indexer_between_time( DatetimeIndex._add_comparison_ops() DatetimeIndex._add_numeric_methods_disabled() DatetimeIndex._add_logical_methods_disabled() -DatetimeIndex._add_datetimelike_methods() def date_range( diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 022e3ba674a27..9b02a27a2013b 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -845,7 +845,6 @@ def memory_usage(self, deep=False): PeriodIndex._add_comparison_ops() PeriodIndex._add_numeric_methods_disabled() PeriodIndex._add_logical_methods_disabled() -PeriodIndex._add_datetimelike_methods() def period_range( diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 795b4836b9a2a..eba4726755234 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -507,7 +507,6 @@ def delete(self, loc): TimedeltaIndex._add_comparison_ops() TimedeltaIndex._add_logical_methods_disabled() -TimedeltaIndex._add_datetimelike_methods() def timedelta_range(
https://api.github.com/repos/pandas-dev/pandas/pulls/30623
2020-01-02T19:39:09Z
2020-01-03T00:51:34Z
2020-01-03T00:51:34Z
2020-01-03T00:53:10Z
DOC: Fixing PR09 formatting errors
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 5f4bd801429a4..704914fb964fb 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -232,7 +232,7 @@ class Categorical(ExtensionArray, PandasObject): `categories` attribute (which in turn is the `categories` argument, if provided). dtype : CategoricalDtype - An instance of ``CategoricalDtype`` to use for this categorical + An instance of ``CategoricalDtype`` to use for this categorical. .. versionadded:: 0.21.0 diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index eb762a23d684d..aeb953031ae89 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -230,12 +230,12 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps The datetime data. For DatetimeArray `values` (or a Series or Index boxing one), - `dtype` and `freq` will be extracted from `values`, with - precedence given to + `dtype` and `freq` will be extracted from `values`. dtype : numpy.dtype or DatetimeTZDtype Note that the only NumPy dtype allowed is 'datetime64[ns]'. freq : str or Offset, optional + The frequency. copy : bool, default False Whether to copy the underlying array of values. diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 1eeb9ddc8e064..854d9067f2f2a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -440,8 +440,9 @@ def to_timestamp(self, freq=None, how="start"): ---------- freq : str or DateOffset, optional Target frequency. The default is 'D' for week or longer, - 'S' otherwise + 'S' otherwise. how : {'s', 'e', 'start', 'end'} + Whether to use the start or end of the time period being converted. Returns ------- @@ -528,17 +529,20 @@ def asfreq(self, freq=None, how="E"): Parameters ---------- freq : str - a frequency + A frequency. how : str {'E', 'S'} - 'E', 'END', or 'FINISH' for end, - 'S', 'START', or 'BEGIN' for start. Whether the elements should be aligned to the end - or start within pa period. January 31st ('END') vs. - January 1st ('START') for example. + or start within pa period. + + * 'E', 'END', or 'FINISH' for end, + * 'S', 'START', or 'BEGIN' for start. + + January 31st ('END') vs. January 1st ('START') for example. Returns ------- - new : Period Array/Index with the new frequency + Period Array/Index + Constructed with the new frequency. Examples -------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b69199defbcc4..f58cce8693e15 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -142,11 +142,12 @@ Name or list of names to sort by. - if `axis` is 0 or `'index'` then `by` may contain index - levels and/or column labels + levels and/or column labels. - if `axis` is 1 or `'columns'` then `by` may contain column - levels and/or index labels + levels and/or index labels. .. versionchanged:: 0.23.0 + Allow specifying index or column level names.""", versionadded_to_excel="", optional_labels="""labels : array-like, optional @@ -2148,9 +2149,10 @@ def to_html( A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. encoding : str, default "utf-8" - Set character encoding + Set character encoding. .. versionadded:: 1.0 + table_id : str, optional A css id is included in the opening `<table>` tag if specified. @@ -7877,7 +7879,7 @@ def idxmin(self, axis=0, skipna=True): Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 - The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. @@ -7915,7 +7917,7 @@ def idxmax(self, axis=0, skipna=True): Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 - The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 81a9145318cb5..5f543181cfb4e 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2099,17 +2099,17 @@ def rank( Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' - * average: average rank of group - * min: lowest rank in group - * max: highest rank in group - * first: ranks assigned in order they appear in the array - * dense: like 'min', but rank always increases by 1 between groups + * average: average rank of group. + * min: lowest rank in group. + * max: highest rank in group. + * first: ranks assigned in order they appear in the array. + * dense: like 'min', but rank always increases by 1 between groups. ascending : bool, default True False for ranks by high (1) to low (N). na_option : {'keep', 'top', 'bottom'}, default 'keep' - * keep: leave NA values where they are - * top: smallest rank if ascending - * bottom: smallest rank if descending + * keep: leave NA values where they are. + * top: smallest rank if ascending. + * bottom: smallest rank if descending. pct : bool, default False Compute percentage rank of data within each group. axis : int, default 0 diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 747a32ae816be..05a5458f60cf5 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -34,8 +34,7 @@ class Grouper: """ - A Grouper allows the user to specify a groupby instruction for a target - object. + A Grouper allows the user to specify a groupby instruction for an object. This specification will select a column via the key parameter, or if the level and/or axis parameters are given, a level of the index of the target @@ -47,17 +46,18 @@ class Grouper: Parameters ---------- key : str, defaults to None - groupby key, which selects the grouping column of the target + Groupby key, which selects the grouping column of the target. level : name/number, defaults to None - the level for the target index + The level for the target index. freq : str / frequency object, defaults to None This will groupby the specified frequency if the target selection (via key or level) is a datetime-like object. For full specification of available frequencies, please see `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_. - axis : number/name of the axis, defaults to 0 + axis : str, int, defaults to 0 + Number/name of the axis. sort : bool, default to False - whether to sort the resulting labels + Whether to sort the resulting labels. closed : {'left' or 'right'} Closed end of interval. Only when `freq` parameter is passed. label : {'left' or 'right'} diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 022e3ba674a27..7d96f1611c9ba 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -79,8 +79,7 @@ class PeriodDelegateMixin(DatetimelikeDelegateMixin): ) class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): """ - Immutable ndarray holding ordinal values indicating regular periods in - time such as particular years, quarters, months, etc. + Immutable ndarray holding ordinal values indicating regular periods in time. Index keys are boxed to Period objects which carries the metadata (eg, frequency information). @@ -88,9 +87,9 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): Parameters ---------- data : array-like (1d int np.ndarray or PeriodArray), optional - Optional period-like data to construct index with + Optional period-like data to construct index with. copy : bool - Make a copy of input ndarray + Make a copy of input ndarray. freq : str or period object, optional One of pandas period strings or corresponding objects year : int, array, or Series, default None @@ -101,7 +100,7 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): minute : int, array, or Series, default None second : int, array, or Series, default None tz : object, default None - Timezone for converting datetime64 data to Periods + Timezone for converting datetime64 data to Periods. dtype : str or PeriodDtype, default None Attributes diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index baecba7e78384..441a8756f09e0 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -314,7 +314,7 @@ def cov(self, other=None, pairwise=None, bias=False, **kwargs): inputs. In the case of missing elements, only complete pairwise observations will be used. bias : bool, default False - Use a standard estimation bias correction + Use a standard estimation bias correction. **kwargs Keyword arguments to be passed into func. """ diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index fe13fce83161d..dfcd4b9a606cd 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -526,8 +526,10 @@ def parse( class ExcelWriter(metaclass=abc.ABCMeta): """ - Class for writing DataFrame objects into excel sheets, default is to use - xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage. + Class for writing DataFrame objects into excel sheets. + + Default is to use xlwt for xls, openpyxl for xlsx. + See DataFrame.to_excel for typical usage. Parameters ---------- @@ -541,7 +543,7 @@ class ExcelWriter(metaclass=abc.ABCMeta): Format string for dates written into Excel files (e.g. 'YYYY-MM-DD'). datetime_format : str, default None Format string for datetime objects written into Excel files. - (e.g. 'YYYY-MM-DD HH:MM:SS') + (e.g. 'YYYY-MM-DD HH:MM:SS'). mode : {'w', 'a'}, default 'w' File mode to use (write or append).
- part of #28602 - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` just the good stuff from #29530
https://api.github.com/repos/pandas-dev/pandas/pulls/30622
2020-01-02T18:46:30Z
2020-01-03T00:52:21Z
2020-01-03T00:52:20Z
2020-01-06T16:46:55Z
DEPR: pandas.util.testing
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 43b1b31a0bfe8..eb903e28ff719 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -3,7 +3,7 @@ import numpy as np import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm try: from pandas.api.types import union_categoricals diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py index a9e45cad22d27..a1ae83373528b 100644 --- a/asv_bench/benchmarks/ctors.py +++ b/asv_bench/benchmarks/ctors.py @@ -1,7 +1,7 @@ import numpy as np from pandas import DatetimeIndex, Index, MultiIndex, Series, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm def no_change(arr): diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py index 1deca8fe3aad0..fdcdcc06bef4d 100644 --- a/asv_bench/benchmarks/frame_ctor.py +++ b/asv_bench/benchmarks/frame_ctor.py @@ -1,7 +1,7 @@ import numpy as np from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm try: from pandas.tseries.offsets import Nano, Hour diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index ae6c07107f4a0..33c415d91de60 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -4,7 +4,7 @@ import numpy as np from pandas import DataFrame, MultiIndex, NaT, Series, date_range, isnull, period_range -import pandas.util.testing as tm +import pandas._testing as tm class GetNumericData: diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index 860c6cc6192bb..6efae0b7222ad 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -1,8 +1,8 @@ import numpy as np from pandas import DataFrame, Series, date_range, factorize, read_csv +import pandas._testing as tm from pandas.core.algorithms import take_1d -import pandas.util.testing as tm try: from pandas import ( @@ -24,7 +24,7 @@ except ImportError: from pandas import algos try: - from pandas.util.testing import test_parallel + from pandas._testing import test_parallel have_real_test_parallel = True except ImportError: diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index d51c53e2264f1..f526369b634b4 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -13,7 +13,7 @@ date_range, period_range, ) -import pandas.util.testing as tm +import pandas._testing as tm method_blacklist = { "object": { diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index d69799eb70040..0ba0ac764172b 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -12,7 +12,7 @@ Series, date_range, ) -import pandas.util.testing as tm +import pandas._testing as tm class SetOperations: diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 6453649b91270..8cec418ff8a41 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -17,7 +17,7 @@ option_context, period_range, ) -import pandas.util.testing as tm +import pandas._testing as tm class NumericSeriesIndexing: diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py index e85b3bd2c7687..9e33f91b4d548 100644 --- a/asv_bench/benchmarks/inference.py +++ b/asv_bench/benchmarks/inference.py @@ -1,7 +1,7 @@ import numpy as np from pandas import DataFrame, Series, to_numeric -import pandas.util.testing as tm +import pandas._testing as tm from .pandas_vb_common import lib, numeric_dtypes diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py index b8e8630e663ee..d25ad1026f91c 100644 --- a/asv_bench/benchmarks/io/csv.py +++ b/asv_bench/benchmarks/io/csv.py @@ -5,7 +5,7 @@ import numpy as np from pandas import Categorical, DataFrame, date_range, read_csv, to_datetime -import pandas.util.testing as tm +import pandas._testing as tm from ..pandas_vb_common import BaseIO diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py index 75d87140488e3..d3ab8a12c1e2b 100644 --- a/asv_bench/benchmarks/io/excel.py +++ b/asv_bench/benchmarks/io/excel.py @@ -6,7 +6,7 @@ from odf.text import P from pandas import DataFrame, ExcelWriter, date_range, read_excel -import pandas.util.testing as tm +import pandas._testing as tm def _generate_dataframe(): diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py index 88c1a3dc48ea4..1bb8411d005cf 100644 --- a/asv_bench/benchmarks/io/hdf.py +++ b/asv_bench/benchmarks/io/hdf.py @@ -1,7 +1,7 @@ import numpy as np from pandas import DataFrame, HDFStore, date_range, read_hdf -import pandas.util.testing as tm +import pandas._testing as tm from ..pandas_vb_common import BaseIO diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py index 27096bcaba78b..d758693b4b0a9 100644 --- a/asv_bench/benchmarks/io/json.py +++ b/asv_bench/benchmarks/io/json.py @@ -1,7 +1,7 @@ import numpy as np from pandas import DataFrame, concat, date_range, read_json, timedelta_range -import pandas.util.testing as tm +import pandas._testing as tm from ..pandas_vb_common import BaseIO diff --git a/asv_bench/benchmarks/io/pickle.py b/asv_bench/benchmarks/io/pickle.py index 12620656dd2bf..158a9386696e6 100644 --- a/asv_bench/benchmarks/io/pickle.py +++ b/asv_bench/benchmarks/io/pickle.py @@ -1,7 +1,7 @@ import numpy as np from pandas import DataFrame, date_range, read_pickle -import pandas.util.testing as tm +import pandas._testing as tm from ..pandas_vb_common import BaseIO diff --git a/asv_bench/benchmarks/io/sql.py b/asv_bench/benchmarks/io/sql.py index 6cc7f56ae3d65..a5f4d54710dde 100644 --- a/asv_bench/benchmarks/io/sql.py +++ b/asv_bench/benchmarks/io/sql.py @@ -4,7 +4,7 @@ from sqlalchemy import create_engine from pandas import DataFrame, date_range, read_sql_query, read_sql_table -import pandas.util.testing as tm +import pandas._testing as tm class SQL: diff --git a/asv_bench/benchmarks/io/stata.py b/asv_bench/benchmarks/io/stata.py index f3125f8598418..70a773f994d8a 100644 --- a/asv_bench/benchmarks/io/stata.py +++ b/asv_bench/benchmarks/io/stata.py @@ -1,7 +1,7 @@ import numpy as np from pandas import DataFrame, date_range, read_stata -import pandas.util.testing as tm +import pandas._testing as tm from ..pandas_vb_common import BaseIO diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 5cf9f6336ba0c..77e5afffc6260 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -3,7 +3,7 @@ import numpy as np from pandas import DataFrame, MultiIndex, Series, concat, date_range, merge, merge_asof -import pandas.util.testing as tm +import pandas._testing as tm try: from pandas import merge_ordered diff --git a/asv_bench/benchmarks/multiindex_object.py b/asv_bench/benchmarks/multiindex_object.py index 5a396c9f0deff..1d2412975046b 100644 --- a/asv_bench/benchmarks/multiindex_object.py +++ b/asv_bench/benchmarks/multiindex_object.py @@ -3,7 +3,7 @@ import numpy as np from pandas import DataFrame, MultiIndex, RangeIndex, date_range -import pandas.util.testing as tm +import pandas._testing as tm class GetLoc: diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py index cd450f801c805..d5bfaa1fb0314 100644 --- a/asv_bench/benchmarks/reindex.py +++ b/asv_bench/benchmarks/reindex.py @@ -1,7 +1,7 @@ import numpy as np from pandas import DataFrame, Index, MultiIndex, Series, date_range, period_range -import pandas.util.testing as tm +import pandas._testing as tm from .pandas_vb_common import lib diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index a3f1d92545c3f..e335963aa2d7c 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -3,7 +3,7 @@ import numpy as np from pandas import NaT, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm class SeriesConstructor: diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index f30b2482615bd..aca18553f1086 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -3,7 +3,7 @@ import numpy as np from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm class Methods: diff --git a/ci/code_checks.sh b/ci/code_checks.sh index dc1c8481b1712..a90774d2e8ff1 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -139,8 +139,8 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then RET=$(($RET + $?)) ; echo $MSG "DONE" # Checks for test suite - # Check for imports from pandas.util.testing instead of `import pandas.util.testing as tm` - invgrep -R --include="*.py*" -E "from pandas.util.testing import" pandas/tests + # Check for imports from pandas._testing instead of `import pandas._testing as tm` + invgrep -R --include="*.py*" -E "from pandas._testing import" pandas/tests RET=$(($RET + $?)) ; echo $MSG "DONE" invgrep -R --include="*.py*" -E "from pandas.util import testing as tm" pandas/tests RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index d7b3e159f8ce7..253dd5160a7ff 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -957,7 +957,7 @@ inspiration. If your test requires working with files or network connectivity, there is more information on the `testing page <https://github.com/pandas-dev/pandas/wiki/Testing>`_ of the wiki. -The ``pandas.util.testing`` module has many special ``assert`` functions that +The ``pandas._testing`` module has many special ``assert`` functions that make it easier to make statements about whether Series or DataFrame objects are equivalent. The easiest way to verify that your code is correct is to explicitly construct the result you expect, then compare the actual result to @@ -1143,7 +1143,7 @@ If your change involves checking that a warning is actually emitted, use .. code-block:: python - import pandas.util.testing as tm + import pandas._testing as tm df = pd.DataFrame() diff --git a/doc/source/reference/general_utility_functions.rst b/doc/source/reference/general_utility_functions.rst index 0961acc43f301..e2e47d9f87960 100644 --- a/doc/source/reference/general_utility_functions.rst +++ b/doc/source/reference/general_utility_functions.rst @@ -18,6 +18,8 @@ Working with options set_option option_context +.. _api.general.testing: + Testing functions ----------------- .. autosummary:: @@ -26,6 +28,16 @@ Testing functions testing.assert_frame_equal testing.assert_series_equal testing.assert_index_equal + testing.assert_equal + testing.assert_almost_equal + testing.assert_categorical_equal + testing.assert_datetime_array_equal + testing.assert_extension_array_equal + testing.assert_interval_array_equal + testing.assert_numpy_array_equal + testing.assert_period_array_equal + testing.assert_sp_array_equal + testing.assert_timedelta_array_equal Exceptions and warnings ----------------------- diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index c32b009948fda..f9751dae87deb 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -1519,7 +1519,7 @@ rows will skip the intervening rows. .. ipython:: python - from pandas.util.testing import makeCustomDataframe as mkdf + from pandas._testing import makeCustomDataframe as mkdf df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4) df.to_csv('mi.csv') print(open('mi.csv').read()) diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index 8583a9312b690..b28354cd8b5f2 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -14,7 +14,7 @@ Reshaping by pivoting DataFrame objects .. ipython:: python :suppress: - import pandas.util.testing as tm + import pandas._testing as tm tm.N = 3 def unpivot(frame): @@ -38,7 +38,7 @@ For the curious here is how the above ``DataFrame`` was created: .. code-block:: python - import pandas.util.testing as tm + import pandas._testing as tm tm.N = 3 diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst index 0611c6334937f..43bb4966ec5bf 100644 --- a/doc/source/user_guide/scale.rst +++ b/doc/source/user_guide/scale.rst @@ -26,7 +26,7 @@ Assuming you want or need the expressiveness and power of pandas, let's carry on .. ipython:: python :suppress: - from pandas.util.testing import _make_timeseries + from pandas._testing import _make_timeseries # Make a random in-memory dataset ts = _make_timeseries(freq="30S", seed=0) diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst index 86ff338536f80..823e177f3e05e 100644 --- a/doc/source/whatsnew/v0.12.0.rst +++ b/doc/source/whatsnew/v0.12.0.rst @@ -236,7 +236,7 @@ I/O enhancements .. ipython:: python - from pandas.util.testing import makeCustomDataframe as mkdf + from pandas._testing import makeCustomDataframe as mkdf df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4) df.to_csv('mi.csv') print(open('mi.csv').read()) diff --git a/doc/source/whatsnew/v0.13.1.rst b/doc/source/whatsnew/v0.13.1.rst index 6242c40d44bf8..4f9ab761334e7 100644 --- a/doc/source/whatsnew/v0.13.1.rst +++ b/doc/source/whatsnew/v0.13.1.rst @@ -224,7 +224,7 @@ Enhancements .. code-block:: ipython - In [28]: import pandas.util.testing as tm + In [28]: import pandas._testing as tm In [29]: panel = tm.makePanel(5) diff --git a/doc/source/whatsnew/v0.18.0.rst b/doc/source/whatsnew/v0.18.0.rst index a7174c6325f86..d3f96d4185d65 100644 --- a/doc/source/whatsnew/v0.18.0.rst +++ b/doc/source/whatsnew/v0.18.0.rst @@ -1279,7 +1279,7 @@ Bug Fixes - Removed ``millisecond`` property of ``DatetimeIndex``. This would always raise a ``ValueError`` (:issue:`12019`). - Bug in ``Series`` constructor with read-only data (:issue:`11502`) -- Removed ``pandas.util.testing.choice()``. Should use ``np.random.choice()``, instead. (:issue:`12386`) +- Removed ``pandas._testing.choice()``. Should use ``np.random.choice()``, instead. (:issue:`12386`) - Bug in ``.loc`` setitem indexer preventing the use of a TZ-aware DatetimeIndex (:issue:`12050`) - Bug in ``.style`` indexes and MultiIndexes not appearing (:issue:`11655`) - Bug in ``to_msgpack`` and ``from_msgpack`` which did not correctly serialize or deserialize ``NaT`` (:issue:`12307`). diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index e7dc6150ffcb1..ceb1c7f27231b 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -1360,7 +1360,7 @@ provides a :meth:`~Panel.to_xarray` method to automate this conversion (:issue:` .. code-block:: ipython - In [133]: import pandas.util.testing as tm + In [133]: import pandas._testing as tm In [134]: p = tm.makePanel() diff --git a/doc/source/whatsnew/v0.21.0.rst b/doc/source/whatsnew/v0.21.0.rst index f33943e423b25..2a160eed9f8fd 100644 --- a/doc/source/whatsnew/v0.21.0.rst +++ b/doc/source/whatsnew/v0.21.0.rst @@ -927,7 +927,7 @@ Other API changes - :class:`pandas.HDFStore`'s string representation is now faster and less detailed. For the previous behavior, use ``pandas.HDFStore.info()``. (:issue:`16503`). - Compression defaults in HDF stores now follow pytables standards. Default is no compression and if ``complib`` is missing and ``complevel`` > 0 ``zlib`` is used (:issue:`15943`) - ``Index.get_indexer_non_unique()`` now returns a ndarray indexer rather than an ``Index``; this is consistent with ``Index.get_indexer()`` (:issue:`16819`) -- Removed the ``@slow`` decorator from ``pandas.util.testing``, which caused issues for some downstream packages' test suites. Use ``@pytest.mark.slow`` instead, which achieves the same thing (:issue:`16850`) +- Removed the ``@slow`` decorator from ``pandas._testing``, which caused issues for some downstream packages' test suites. Use ``@pytest.mark.slow`` instead, which achieves the same thing (:issue:`16850`) - Moved definition of ``MergeError`` to the ``pandas.errors`` module. - The signature of :func:`Series.set_axis` and :func:`DataFrame.set_axis` has been changed from ``set_axis(axis, labels)`` to ``set_axis(labels, axis=0)``, for consistency with the rest of the API. The old signature is deprecated and will show a ``FutureWarning`` (:issue:`14636`) - :func:`Series.argmin` and :func:`Series.argmax` will now raise a ``TypeError`` when used with ``object`` dtypes, instead of a ``ValueError`` (:issue:`13595`) diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst index f4c283ea742f7..b9e1b5060d1da 100644 --- a/doc/source/whatsnew/v0.23.0.rst +++ b/doc/source/whatsnew/v0.23.0.rst @@ -648,7 +648,7 @@ provides a :meth:`~Panel.to_xarray` method to automate this conversion (:issue:` .. code-block:: ipython - In [75]: import pandas.util.testing as tm + In [75]: import pandas._testing as tm In [76]: p = tm.makePanel() diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index a5ea60d0a0d19..b9cc1dad53674 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -577,6 +577,7 @@ Deprecations it is recommended to use ``json_normalize`` as :func:`pandas.json_normalize` instead (:issue:`27586`). - :meth:`DataFrame.to_stata`, :meth:`DataFrame.to_feather`, and :meth:`DataFrame.to_parquet` argument "fname" is deprecated, use "path" instead (:issue:`23574`) - The deprecated internal attributes ``_start``, ``_stop`` and ``_step`` of :class:`RangeIndex` now raise a ``FutureWarning`` instead of a ``DeprecationWarning`` (:issue:`26581`) +- The ``pandas.util.testing`` module has been deprecated. Use the public API in ``pandas.testing`` documented at :ref:`api.general.testing` (:issue:`16232`). **Selecting Columns from a Grouped DataFrame** @@ -703,7 +704,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - Ability to read pickles containing :class:`Categorical` instances created with pre-0.16 version of pandas has been removed (:issue:`27538`) - Removed :func:`pandas.tseries.plotting.tsplot` (:issue:`18627`) - Removed the previously deprecated keywords "reduce" and "broadcast" from :meth:`DataFrame.apply` (:issue:`18577`) -- Removed the previously deprecated ``assert_raises_regex`` function in ``pandas.util.testing`` (:issue:`29174`) +- Removed the previously deprecated ``assert_raises_regex`` function in ``pandas._testing`` (:issue:`29174`) - Removed the previously deprecated ``FrozenNDArray`` class in ``pandas.core.indexes.frozen`` (:issue:`29335`) - Removed the previously deprecated keyword "nthreads" from :func:`read_feather`, use "use_threads" instead (:issue:`23053`) - Removed :meth:`Index.is_lexsorted_for_tuple` (:issue:`29305`) diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx index 026bd7a44a509..5a30b71a6fea1 100644 --- a/pandas/_libs/testing.pyx +++ b/pandas/_libs/testing.pyx @@ -123,7 +123,7 @@ cpdef assert_almost_equal(a, b, if isiterable(a): if not isiterable(b): - from pandas.util.testing import assert_class_equal + from pandas._testing import assert_class_equal # classes can't be the same, to raise error assert_class_equal(a, b, obj=obj) @@ -134,12 +134,12 @@ cpdef assert_almost_equal(a, b, if a_is_ndarray and b_is_ndarray: na, nb = a.size, b.size if a.shape != b.shape: - from pandas.util.testing import raise_assert_detail + from pandas._testing import raise_assert_detail raise_assert_detail( obj, f'{obj} shapes are different', a.shape, b.shape) if check_dtype and not is_dtype_equal(a.dtype, b.dtype): - from pandas.util.testing import assert_attr_equal + from pandas._testing import assert_attr_equal assert_attr_equal('dtype', a, b, obj=obj) if array_equivalent(a, b, strict_nan=True): @@ -149,7 +149,7 @@ cpdef assert_almost_equal(a, b, na, nb = len(a), len(b) if na != nb: - from pandas.util.testing import raise_assert_detail + from pandas._testing import raise_assert_detail # if we have a small diff set, print it if abs(na - nb) < 10: @@ -168,7 +168,7 @@ cpdef assert_almost_equal(a, b, diff += 1 if is_unequal: - from pandas.util.testing import raise_assert_detail + from pandas._testing import raise_assert_detail msg = (f"{obj} values are different " f"({np.round(diff * 100.0 / na, 5)} %)") raise_assert_detail(obj, msg, lobj, robj) @@ -176,7 +176,7 @@ cpdef assert_almost_equal(a, b, return True elif isiterable(b): - from pandas.util.testing import assert_class_equal + from pandas._testing import assert_class_equal # classes can't be the same, to raise error assert_class_equal(a, b, obj=obj) diff --git a/pandas/util/testing.py b/pandas/_testing.py similarity index 99% rename from pandas/util/testing.py rename to pandas/_testing.py index 2e201339d4d77..2ebebc5d5e10a 100644 --- a/pandas/util/testing.py +++ b/pandas/_testing.py @@ -921,23 +921,24 @@ def assert_numpy_array_equal( check_same=None, obj="numpy array", ): - """ Checks that 'np.ndarray' is equivalent + """ + Check that 'np.ndarray' is equivalent. Parameters ---------- - left : np.ndarray or iterable - right : np.ndarray or iterable + left, right : numpy.ndarray or iterable + The two arrays to be compared. strict_nan : bool, default False If True, consider NaN and None to be different. - check_dtype: bool, default True - check dtype if both a and b are np.ndarray + check_dtype : bool, default True + Check dtype if both a and b are np.ndarray. err_msg : str, default None - If provided, used as assertion message + If provided, used as assertion message. check_same : None|'copy'|'same', default None - Ensure left and right refer/do not refer to the same memory area + Ensure left and right refer/do not refer to the same memory area. obj : str, default 'numpy array' Specify object name being compared, internally used to show appropriate - assertion message + assertion message. """ __tracebackhide__ = True @@ -1273,7 +1274,7 @@ def assert_frame_equal( This example shows comparing two DataFrames that are equal but with columns of differing dtypes. - >>> from pandas.util.testing import assert_frame_equal + >>> from pandas._testing import assert_frame_equal >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]}) @@ -1375,9 +1376,10 @@ def assert_equal(left, right, **kwargs): Parameters ---------- - left : Index, Series, DataFrame, ExtensionArray, or np.ndarray - right : Index, Series, DataFrame, ExtensionArray, or np.ndarray + left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray + The two items to be compared. **kwargs + All keyword arguments are passed through to the underlying assert method. """ __tracebackhide__ = True @@ -1401,7 +1403,7 @@ def assert_equal(left, right, **kwargs): assert_numpy_array_equal(left, right, **kwargs) elif isinstance(left, str): assert kwargs == {} - return left == right + assert left == right else: raise NotImplementedError(type(left)) @@ -2281,7 +2283,7 @@ def network( Tests decorated with @network will fail if it's possible to make a network connection to another URL (defaults to google.com):: - >>> from pandas.util.testing import network + >>> from pandas._testing import network >>> from pandas.io.common import urlopen >>> @network ... def test_network(): diff --git a/pandas/conftest.py b/pandas/conftest.py index eb7263fe116cc..3eab2186ccb94 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -15,8 +15,8 @@ import pandas as pd from pandas import DataFrame +import pandas._testing as tm from pandas.core import ops -import pandas.util.testing as tm hypothesis.settings.register_profile( "ci", diff --git a/pandas/testing.py b/pandas/testing.py index acae47367d997..26a60d80854b8 100644 --- a/pandas/testing.py +++ b/pandas/testing.py @@ -1,11 +1,35 @@ -# flake8: noqa - """ Public testing utility functions. """ -from pandas.util.testing import ( +from pandas._testing import ( + assert_almost_equal, + assert_categorical_equal, + assert_datetime_array_equal, + assert_equal, + assert_extension_array_equal, assert_frame_equal, assert_index_equal, + assert_interval_array_equal, + assert_numpy_array_equal, + assert_period_array_equal, assert_series_equal, + assert_sp_array_equal, + assert_timedelta_array_equal, ) + +__all__ = [ + "assert_frame_equal", + "assert_series_equal", + "assert_index_equal", + "assert_equal", + "assert_almost_equal", + "assert_categorical_equal", + "assert_datetime_array_equal", + "assert_extension_array_equal", + "assert_interval_array_equal", + "assert_numpy_array_equal", + "assert_period_array_equal", + "assert_sp_array_equal", + "assert_timedelta_array_equal", +] diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index d865f26983579..82bf0c0fff9c0 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -2,7 +2,7 @@ import pandas as pd from pandas import api, compat -import pandas.util.testing as tm +import pandas._testing as tm class Base: @@ -193,6 +193,7 @@ class TestPDApi(Base): "_np_version_under1p16", "_np_version_under1p17", "_np_version_under1p18", + "_testing", "_tslib", "_typing", "_version", @@ -266,9 +267,36 @@ def test_api(self): class TestTesting(Base): - funcs = ["assert_frame_equal", "assert_series_equal", "assert_index_equal"] + funcs = [ + "assert_frame_equal", + "assert_series_equal", + "assert_index_equal", + "assert_equal", + "assert_almost_equal", + "assert_categorical_equal", + "assert_datetime_array_equal", + "assert_extension_array_equal", + "assert_interval_array_equal", + "assert_numpy_array_equal", + "assert_period_array_equal", + "assert_sp_array_equal", + "assert_timedelta_array_equal", + ] def test_testing(self): from pandas import testing self.check(testing, self.funcs) + + def test_util_testing_deprecated(self): + s = pd.Series([], dtype="object") + with tm.assert_produces_warning(FutureWarning) as m: + import pandas.util.testing as tm2 + + tm2.assert_series_equal(s, s) + + assert "pandas.testing.assert_series_equal" in str(m[0].message) + + with tm.assert_produces_warning(FutureWarning) as m: + tm2.DataFrame + assert "removed" in str(m[0].message) diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py index 97480502f192c..31423c03dee34 100644 --- a/pandas/tests/api/test_types.py +++ b/pandas/tests/api/test_types.py @@ -1,5 +1,5 @@ +import pandas._testing as tm from pandas.api import types -import pandas.util.testing as tm from .test_api import Base diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py index bc02a1e76a695..7c3ceb3dba2b6 100644 --- a/pandas/tests/arithmetic/common.py +++ b/pandas/tests/arithmetic/common.py @@ -5,7 +5,7 @@ import pytest from pandas import DataFrame, Index, Series -import pandas.util.testing as tm +import pandas._testing as tm def assert_invalid_addsub_type(left, right, msg=None): diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py index 64588af3e3053..577093c0f2967 100644 --- a/pandas/tests/arithmetic/conftest.py +++ b/pandas/tests/arithmetic/conftest.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm # ------------------------------------------------------------------ # Helper Functions diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index ec25f022f5a9e..20ea8d31ebbe2 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -26,6 +26,7 @@ Timestamp, date_range, ) +import pandas._testing as tm from pandas.core.indexes.datetimes import _to_M8 from pandas.core.ops import roperator from pandas.tests.arithmetic.common import ( @@ -33,7 +34,6 @@ assert_invalid_comparison, get_upcast_box, ) -import pandas.util.testing as tm # ------------------------------------------------------------------ # Comparisons diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 9733d589ee93b..b2826ab139ed6 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -12,8 +12,8 @@ import pandas as pd from pandas import Index, Series, Timedelta, TimedeltaIndex +import pandas._testing as tm from pandas.core import ops -import pandas.util.testing as tm def adjust_negative_zero(zero, expected): diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index f9c1de115b3a4..799ef3492e53f 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -9,8 +9,8 @@ import pandas as pd from pandas import Series, Timestamp +import pandas._testing as tm from pandas.core import ops -import pandas.util.testing as tm # ------------------------------------------------------------------ # Comparisons diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index 3ad7a6d8e465c..b89a2d99e1d80 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -11,9 +11,9 @@ import pandas as pd from pandas import Period, PeriodIndex, Series, period_range +import pandas._testing as tm from pandas.core import ops from pandas.core.arrays import TimedeltaArray -import pandas.util.testing as tm from pandas.tseries.frequencies import to_offset diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index d61adf5ef2e7b..9b0d3712e9bea 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -18,12 +18,12 @@ Timestamp, timedelta_range, ) +import pandas._testing as tm from pandas.tests.arithmetic.common import ( assert_invalid_addsub_type, assert_invalid_comparison, get_upcast_box, ) -import pandas.util.testing as tm # ------------------------------------------------------------------ # Timedelta64[ns] dtype Comparisons diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py index 870a0a5db175e..c55a19bf04987 100644 --- a/pandas/tests/arrays/categorical/test_algos.py +++ b/pandas/tests/arrays/categorical/test_algos.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("ordered", [True, False]) diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py index 4122a64a64516..90fcf12093909 100644 --- a/pandas/tests/arrays/categorical/test_analytics.py +++ b/pandas/tests/arrays/categorical/test_analytics.py @@ -6,8 +6,8 @@ from pandas.compat import PYPY from pandas import Categorical, Index, NaT, Series, date_range +import pandas._testing as tm from pandas.api.types import is_scalar -import pandas.util.testing as tm class TestCategoricalAnalytics: diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py index 82f2fe1ab8fb6..df6623acfefee 100644 --- a/pandas/tests/arrays/categorical/test_api.py +++ b/pandas/tests/arrays/categorical/test_api.py @@ -4,9 +4,9 @@ import pytest from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series +import pandas._testing as tm from pandas.core.arrays.categorical import _recode_for_categories from pandas.tests.arrays.categorical.common import TestCategorical -import pandas.util.testing as tm class TestCategoricalAPI: diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index 6c8b654c1955c..70a23e9748dd1 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -24,7 +24,7 @@ period_range, timedelta_range, ) -import pandas.util.testing as tm +import pandas._testing as tm class TestCategoricalConstructors: diff --git a/pandas/tests/arrays/categorical/test_dtypes.py b/pandas/tests/arrays/categorical/test_dtypes.py index 85bf385b029a3..19746d7d72162 100644 --- a/pandas/tests/arrays/categorical/test_dtypes.py +++ b/pandas/tests/arrays/categorical/test_dtypes.py @@ -4,7 +4,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype from pandas import Categorical, CategoricalIndex, Index, Series, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm class TestCategoricalDtypes: diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py index 37dea53f792cb..8ff6a4709c0d7 100644 --- a/pandas/tests/arrays/categorical/test_indexing.py +++ b/pandas/tests/arrays/categorical/test_indexing.py @@ -3,9 +3,9 @@ import pandas as pd from pandas import Categorical, CategoricalIndex, Index, PeriodIndex, Series +import pandas._testing as tm import pandas.core.common as com from pandas.tests.arrays.categorical.common import TestCategorical -import pandas.util.testing as tm class TestCategoricalIndexingWithFactor(TestCategorical): diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py index 3037ac79cd592..211bf091ee17d 100644 --- a/pandas/tests/arrays/categorical/test_missing.py +++ b/pandas/tests/arrays/categorical/test_missing.py @@ -6,7 +6,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype from pandas import Categorical, Index, Series, isna -import pandas.util.testing as tm +import pandas._testing as tm class TestCategoricalMissing: diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index 10e33bf70dc66..f58524a86b4aa 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -6,8 +6,8 @@ import pandas as pd from pandas import Categorical, DataFrame, Series, date_range +import pandas._testing as tm from pandas.tests.arrays.categorical.common import TestCategorical -import pandas.util.testing as tm class TestCategoricalOpsWithFactor(TestCategorical): diff --git a/pandas/tests/arrays/categorical/test_sorting.py b/pandas/tests/arrays/categorical/test_sorting.py index a0b09e19ece6e..2a0ef043bf9a9 100644 --- a/pandas/tests/arrays/categorical/test_sorting.py +++ b/pandas/tests/arrays/categorical/test_sorting.py @@ -2,7 +2,7 @@ import pytest from pandas import Categorical, Index -import pandas.util.testing as tm +import pandas._testing as tm class TestCategoricalSort: diff --git a/pandas/tests/arrays/categorical/test_subclass.py b/pandas/tests/arrays/categorical/test_subclass.py index cfc7b8541302f..b80d0ff41aba6 100644 --- a/pandas/tests/arrays/categorical/test_subclass.py +++ b/pandas/tests/arrays/categorical/test_subclass.py @@ -1,5 +1,5 @@ from pandas import Categorical -import pandas.util.testing as tm +import pandas._testing as tm class TestCategoricalSubclassing: diff --git a/pandas/tests/arrays/categorical/test_warnings.py b/pandas/tests/arrays/categorical/test_warnings.py index 29bd5252dbe3a..1ee877cbbf348 100644 --- a/pandas/tests/arrays/categorical/test_warnings.py +++ b/pandas/tests/arrays/categorical/test_warnings.py @@ -1,6 +1,6 @@ import pytest -import pandas.util.testing as tm +import pandas._testing as tm class TestCategoricalWarnings: diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py index 655a6e717119b..82db14d9eb135 100644 --- a/pandas/tests/arrays/interval/test_interval.py +++ b/pandas/tests/arrays/interval/test_interval.py @@ -11,8 +11,8 @@ date_range, timedelta_range, ) +import pandas._testing as tm from pandas.core.arrays import IntervalArray -import pandas.util.testing as tm @pytest.fixture( diff --git a/pandas/tests/arrays/interval/test_ops.py b/pandas/tests/arrays/interval/test_ops.py index a55c33c2f22e9..b4de80dc00a4e 100644 --- a/pandas/tests/arrays/interval/test_ops.py +++ b/pandas/tests/arrays/interval/test_ops.py @@ -3,8 +3,8 @@ import pytest from pandas import Interval, IntervalIndex, Timedelta, Timestamp +import pandas._testing as tm from pandas.core.arrays import IntervalArray -import pandas.util.testing as tm @pytest.fixture(params=[IntervalArray, IntervalIndex]) diff --git a/pandas/tests/arrays/sparse/test_accessor.py b/pandas/tests/arrays/sparse/test_accessor.py index eab174862818c..e40535697cf1b 100644 --- a/pandas/tests/arrays/sparse/test_accessor.py +++ b/pandas/tests/arrays/sparse/test_accessor.py @@ -6,7 +6,7 @@ import pandas.util._test_decorators as td import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesAccessor: diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index f1d2803ce5505..b23e011a92ed9 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -4,9 +4,9 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.core import ops from pandas.core.arrays.sparse import SparseDtype -import pandas.util.testing as tm @pytest.fixture(params=["integer", "block"]) diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 0aaf294378bf7..4cb6d48fa6ec0 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -10,8 +10,8 @@ import pandas as pd from pandas import isna +import pandas._testing as tm from pandas.core.arrays.sparse import SparseArray, SparseDtype -import pandas.util.testing as tm @pytest.fixture(params=["integer", "block"]) diff --git a/pandas/tests/arrays/sparse/test_combine_concat.py b/pandas/tests/arrays/sparse/test_combine_concat.py index 4ad1aa60e7b4f..bcca4a23ea9ed 100644 --- a/pandas/tests/arrays/sparse/test_combine_concat.py +++ b/pandas/tests/arrays/sparse/test_combine_concat.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm class TestSparseArrayConcat: diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py index 7a85ccf271e76..a2f861d378e67 100644 --- a/pandas/tests/arrays/sparse/test_libsparse.py +++ b/pandas/tests/arrays/sparse/test_libsparse.py @@ -7,8 +7,8 @@ import pandas.util._test_decorators as td from pandas import Series +import pandas._testing as tm from pandas.core.arrays.sparse import BlockIndex, IntIndex, _make_index -import pandas.util.testing as tm TEST_LENGTH = 20 diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index c3f342f16a0bf..ec7e35e5c6db4 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -6,7 +6,7 @@ import pandas.util._test_decorators as td import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm def test_repr_with_NA(): diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index f2a4e73e7b6ad..4d714623db5f7 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -8,11 +8,11 @@ from pandas.core.dtypes.dtypes import registry import pandas as pd +import pandas._testing as tm from pandas.api.extensions import register_extension_dtype from pandas.api.types import is_scalar from pandas.core.arrays import PandasArray, integer_array, period_array from pandas.tests.extension.decimal import DecimalArray, DecimalDtype, to_decimal -import pandas.util.testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/arrays/test_boolean.py b/pandas/tests/arrays/test_boolean.py index abec4b42c0ffb..278b4d41262b7 100644 --- a/pandas/tests/arrays/test_boolean.py +++ b/pandas/tests/arrays/test_boolean.py @@ -6,10 +6,10 @@ import pandas.util._test_decorators as td import pandas as pd +import pandas._testing as tm from pandas.arrays import BooleanArray from pandas.core.arrays.boolean import coerce_to_array from pandas.tests.extension.base import BaseOpsUtil -import pandas.util.testing as tm def make_data(): diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index e9c64d04ec860..d3108c30df324 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -6,11 +6,11 @@ from pandas._libs import OutOfBoundsDatetime import pandas as pd +import pandas._testing as tm from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex from pandas.core.indexes.timedeltas import TimedeltaIndex -import pandas.util.testing as tm # TODO: more freq variants diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index d5ec473f4c74d..bca629ae32270 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -9,9 +9,9 @@ from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd +import pandas._testing as tm from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import sequence_to_dt64ns -import pandas.util.testing as tm class TestDatetimeArrayConstructor: diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py index f9b002d4409ce..f172280202e64 100644 --- a/pandas/tests/arrays/test_integer.py +++ b/pandas/tests/arrays/test_integer.py @@ -6,6 +6,7 @@ from pandas.core.dtypes.generic import ABCIndexClass import pandas as pd +import pandas._testing as tm from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar from pandas.core.arrays import IntegerArray, integer_array from pandas.core.arrays.integer import ( @@ -19,7 +20,6 @@ UInt64Dtype, ) from pandas.tests.extension.base import BaseOpsUtil -import pandas.util.testing as tm def make_data(): diff --git a/pandas/tests/arrays/test_numpy.py b/pandas/tests/arrays/test_numpy.py index 8828a013aeea1..86793c4ec50dd 100644 --- a/pandas/tests/arrays/test_numpy.py +++ b/pandas/tests/arrays/test_numpy.py @@ -6,9 +6,9 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.arrays import PandasArray from pandas.core.arrays.numpy_ import PandasDtype -import pandas.util.testing as tm @pytest.fixture( diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py index 252f278242fcc..5068e8d57e1de 100644 --- a/pandas/tests/arrays/test_period.py +++ b/pandas/tests/arrays/test_period.py @@ -7,8 +7,8 @@ from pandas.core.dtypes.dtypes import PeriodDtype, registry import pandas as pd +import pandas._testing as tm from pandas.core.arrays import PeriodArray, period_array -import pandas.util.testing as tm # ---------------------------------------------------------------------------- # Dtype diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 8d54ea564e1c2..667fe36ddf572 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -2,8 +2,8 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.core.arrays import TimedeltaArray -import pandas.util.testing as tm class TestTimedeltaArrayConstructor: diff --git a/pandas/tests/base/test_constructors.py b/pandas/tests/base/test_constructors.py index a9e0473ac067a..0b7274399aafc 100644 --- a/pandas/tests/base/test_constructors.py +++ b/pandas/tests/base/test_constructors.py @@ -8,9 +8,9 @@ import pandas as pd from pandas import DataFrame, Index, Series +import pandas._testing as tm from pandas.core.accessor import PandasDelegate from pandas.core.base import NoNewAttributesMixin, PandasObject -import pandas.util.testing as tm class TestPandasDelegate: diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py index 4295d89869a72..486a1daaf8b50 100644 --- a/pandas/tests/base/test_conversion.py +++ b/pandas/tests/base/test_conversion.py @@ -6,8 +6,8 @@ import pandas as pd from pandas import CategoricalIndex, Series, Timedelta, Timestamp +import pandas._testing as tm from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray -import pandas.util.testing as tm class TestToIterable: diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py index 0d8c280d91256..2693eb12dda71 100644 --- a/pandas/tests/base/test_ops.py +++ b/pandas/tests/base/test_ops.py @@ -29,8 +29,8 @@ TimedeltaIndex, Timestamp, ) +import pandas._testing as tm from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin -import pandas.util.testing as tm class Ops: diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 8438eea84baa8..886c43f84045e 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -16,6 +16,7 @@ import pandas as pd from pandas import DataFrame, Series, compat, date_range +import pandas._testing as tm from pandas.core.computation import pytables from pandas.core.computation.check import _NUMEXPR_VERSION from pandas.core.computation.engines import NumExprClobberingError, _engines @@ -33,7 +34,6 @@ _special_case_arith_ops_syms, _unary_math_ops, ) -import pandas.util.testing as tm @pytest.fixture( diff --git a/pandas/tests/dtypes/cast/test_construct_from_scalar.py b/pandas/tests/dtypes/cast/test_construct_from_scalar.py index 71f41fcf5b447..cc823a3d6e02c 100644 --- a/pandas/tests/dtypes/cast/test_construct_from_scalar.py +++ b/pandas/tests/dtypes/cast/test_construct_from_scalar.py @@ -2,7 +2,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype from pandas import Categorical -import pandas.util.testing as tm +import pandas._testing as tm def test_cast_1d_array_like_from_scalar_categorical(): diff --git a/pandas/tests/dtypes/cast/test_construct_ndarray.py b/pandas/tests/dtypes/cast/test_construct_ndarray.py index 620e74f80d5fb..fe271392122a2 100644 --- a/pandas/tests/dtypes/cast/test_construct_ndarray.py +++ b/pandas/tests/dtypes/cast/test_construct_ndarray.py @@ -3,7 +3,7 @@ from pandas.core.dtypes.cast import construct_1d_ndarray_preserving_na -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/dtypes/cast/test_downcast.py b/pandas/tests/dtypes/cast/test_downcast.py index 99afabfa42a04..d6e6ed3022b75 100644 --- a/pandas/tests/dtypes/cast/test_downcast.py +++ b/pandas/tests/dtypes/cast/test_downcast.py @@ -6,7 +6,7 @@ from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas import DatetimeIndex, Series, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/dtypes/cast/test_infer_dtype.py b/pandas/tests/dtypes/cast/test_infer_dtype.py index 37fa003668435..2744cfa8ddc62 100644 --- a/pandas/tests/dtypes/cast/test_infer_dtype.py +++ b/pandas/tests/dtypes/cast/test_infer_dtype.py @@ -19,7 +19,7 @@ Timestamp, date_range, ) -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture(params=[True, False]) diff --git a/pandas/tests/dtypes/cast/test_upcast.py b/pandas/tests/dtypes/cast/test_upcast.py index 49e850f3e87b5..bb7a7d059c7ee 100644 --- a/pandas/tests/dtypes/cast/test_upcast.py +++ b/pandas/tests/dtypes/cast/test_upcast.py @@ -4,7 +4,7 @@ from pandas.core.dtypes.cast import maybe_upcast_putmask from pandas import Series -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("result", [Series([10, 11, 12]), [10, 11, 12], (10, 11, 12)]) diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 7abb43bb25e14..f58979f807adb 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -18,6 +18,7 @@ from pandas.core.dtypes.missing import isna import pandas as pd +import pandas._testing as tm from pandas.conftest import ( ALL_EA_INT_DTYPES, ALL_INT_DTYPES, @@ -26,7 +27,6 @@ UNSIGNED_EA_INT_DTYPES, UNSIGNED_INT_DTYPES, ) -import pandas.util.testing as tm # EA & Actual Dtypes diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 3a933a5ca8cdc..13648322fc9c9 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -27,8 +27,8 @@ import pandas as pd from pandas import Categorical, CategoricalIndex, IntervalIndex, Series, date_range +import pandas._testing as tm from pandas.core.arrays.sparse import SparseDtype -import pandas.util.testing as tm class Base: diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py index c17a8997a9b8f..6e9334996100f 100644 --- a/pandas/tests/dtypes/test_generic.py +++ b/pandas/tests/dtypes/test_generic.py @@ -5,7 +5,7 @@ from pandas.core.dtypes import generic as gt import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm class TestABCClasses: diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 343dcc6849af6..d5ca35dce1c85 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -52,8 +52,8 @@ Timestamp, isna, ) +import pandas._testing as tm from pandas.core.arrays import IntegerArray -import pandas.util.testing as tm @pytest.fixture(params=[True, False], ids=str) diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index b1934ecbbceec..7d4811857db5f 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -22,7 +22,7 @@ import pandas as pd from pandas import DatetimeIndex, Float64Index, NaT, Series, TimedeltaIndex, date_range -import pandas.util.testing as tm +import pandas._testing as tm now = pd.Timestamp.now() utcnow = pd.Timestamp.now("UTC") diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index e88c63b19003f..033cb4437cfbe 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -2,8 +2,8 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.tests.extension import base -import pandas.util.testing as tm pytest.importorskip("pyarrow", minversion="0.12.0") diff --git a/pandas/tests/extension/base/base.py b/pandas/tests/extension/base/base.py index 2f808d20acd31..144b0825b39a2 100644 --- a/pandas/tests/extension/base/base.py +++ b/pandas/tests/extension/base/base.py @@ -1,4 +1,4 @@ -import pandas.util.testing as tm +import pandas._testing as tm class BaseExtensionTests: diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py index dc926d2ff6ab4..94d0ef7bbea84 100644 --- a/pandas/tests/extension/base/groupby.py +++ b/pandas/tests/extension/base/groupby.py @@ -1,7 +1,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from .base import BaseExtensionTests diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py index a29f6deeffae6..cdea96334be2a 100644 --- a/pandas/tests/extension/base/interface.py +++ b/pandas/tests/extension/base/interface.py @@ -4,7 +4,7 @@ from pandas.core.dtypes.dtypes import ExtensionDtype import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from .base import BaseExtensionTests diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 973088cb72e7a..1e427c6319cab 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -2,8 +2,8 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.core.sorting import nargsort -import pandas.util.testing as tm from .base import BaseExtensionTests diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py index 21bbb365ab0f3..2393d2edcd2c6 100644 --- a/pandas/tests/extension/base/missing.py +++ b/pandas/tests/extension/base/missing.py @@ -1,7 +1,7 @@ import numpy as np import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from .base import BaseExtensionTests diff --git a/pandas/tests/extension/base/reduce.py b/pandas/tests/extension/base/reduce.py index 8766bb771f8a2..6f433d659575a 100644 --- a/pandas/tests/extension/base/reduce.py +++ b/pandas/tests/extension/base/reduce.py @@ -3,7 +3,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from .base import BaseExtensionTests diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index b5c3abd8ce8f6..d946772a98779 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -6,8 +6,8 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.tests.extension import base -import pandas.util.testing as tm from .array import DecimalArray, DecimalDtype, make_data, to_decimal diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 01f2565e2ee58..4d3145109e3c2 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -4,8 +4,8 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.tests.extension import base -import pandas.util.testing as tm from .array import JSONArray, JSONDtype, make_data diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py index a02433da2da12..9c151b5482c9d 100644 --- a/pandas/tests/extension/test_boolean.py +++ b/pandas/tests/extension/test_boolean.py @@ -19,9 +19,9 @@ from pandas.compat.numpy import _np_version_under1p14 import pandas as pd +import pandas._testing as tm from pandas.core.arrays.boolean import BooleanDtype from pandas.tests.extension import base -import pandas.util.testing as tm def make_data(): diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index dff1e58641ade..336b23e54d74c 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -20,9 +20,9 @@ import pandas as pd from pandas import Categorical, CategoricalIndex, Timestamp +import pandas._testing as tm from pandas.api.types import CategoricalDtype from pandas.tests.extension import base -import pandas.util.testing as tm def make_data(): diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py index 9b5f9d64f6b67..e43650c291200 100644 --- a/pandas/tests/extension/test_common.py +++ b/pandas/tests/extension/test_common.py @@ -5,8 +5,8 @@ from pandas.core.dtypes.common import is_extension_array_dtype import pandas as pd +import pandas._testing as tm from pandas.core.arrays import ExtensionArray -import pandas.util.testing as tm class DummyDtype(dtypes.ExtensionDtype): diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index 55a617caf28ce..7db38f41d4573 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -4,8 +4,8 @@ from pandas.compat.numpy import _np_version_under1p16 import pandas as pd +import pandas._testing as tm from pandas.core.arrays.numpy_ import PandasArray, PandasDtype -import pandas.util.testing as tm from . import base diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 6ebe71e173ec2..3c54f8fcd012b 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -5,8 +5,8 @@ import pandas as pd from pandas import SparseArray, SparseDtype +import pandas._testing as tm from pandas.tests.extension import base -import pandas.util.testing as tm def make_data(fill_value): diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py index 915d6edcd8367..774eb443c45fe 100644 --- a/pandas/tests/frame/conftest.py +++ b/pandas/tests/frame/conftest.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, NaT, date_range -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py index b595e48797d41..5de38915f04c1 100644 --- a/pandas/tests/frame/indexing/test_categorical.py +++ b/pandas/tests/frame/indexing/test_categorical.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Categorical, DataFrame, Index, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameIndexingCategorical: diff --git a/pandas/tests/frame/indexing/test_datetime.py b/pandas/tests/frame/indexing/test_datetime.py index bde35c04acf4f..a1c12be2b0180 100644 --- a/pandas/tests/frame/indexing/test_datetime.py +++ b/pandas/tests/frame/indexing/test_datetime.py @@ -1,6 +1,6 @@ import pandas as pd from pandas import DataFrame, Index, Series, date_range, notna -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameIndexingDatetimeWithTZ: diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 9a53caa491970..0734a7bb240e5 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -20,9 +20,9 @@ isna, notna, ) +import pandas._testing as tm import pandas.core.common as com from pandas.core.indexing import IndexingError -import pandas.util.testing as tm from pandas.tseries.offsets import BDay diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 4fea190f28d7b..df1b128dcd227 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex, Series, Timestamp, date_range, isna -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameIndexingWhere: diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py index 1d6935795b0e4..d128a51f4b390 100644 --- a/pandas/tests/frame/methods/test_append.py +++ b/pandas/tests/frame/methods/test_append.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Series, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameAppend: diff --git a/pandas/tests/frame/methods/test_asof.py b/pandas/tests/frame/methods/test_asof.py index 89be3779e5748..055f81c9942a6 100644 --- a/pandas/tests/frame/methods/test_asof.py +++ b/pandas/tests/frame/methods/test_asof.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, Series, Timestamp, date_range, to_datetime -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/frame/methods/test_clip.py b/pandas/tests/frame/methods/test_clip.py index 48444e909ee01..34727da3b95ae 100644 --- a/pandas/tests/frame/methods/test_clip.py +++ b/pandas/tests/frame/methods/test_clip.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameClip: diff --git a/pandas/tests/frame/methods/test_count.py b/pandas/tests/frame/methods/test_count.py index b5d3d60579f54..13a93e3efc48c 100644 --- a/pandas/tests/frame/methods/test_count.py +++ b/pandas/tests/frame/methods/test_count.py @@ -1,5 +1,5 @@ from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameCount: diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index 209b4a800354d..5c13b60aae0d0 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import DataFrame, Series, isna -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameCov: diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py index 09510fc931546..251563e51e15a 100644 --- a/pandas/tests/frame/methods/test_describe.py +++ b/pandas/tests/frame/methods/test_describe.py @@ -2,7 +2,7 @@ import pandas as pd from pandas import Categorical, DataFrame, Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameDescribe: diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py index 9293855e79b1c..43c25f4c05c2d 100644 --- a/pandas/tests/frame/methods/test_diff.py +++ b/pandas/tests/frame/methods/test_diff.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameDiff: diff --git a/pandas/tests/frame/methods/test_drop_duplicates.py b/pandas/tests/frame/methods/test_drop_duplicates.py index 29ab2e1bfd512..95624dd769b18 100644 --- a/pandas/tests/frame/methods/test_drop_duplicates.py +++ b/pandas/tests/frame/methods/test_drop_duplicates.py @@ -4,7 +4,7 @@ import pytest from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]]) diff --git a/pandas/tests/frame/methods/test_duplicated.py b/pandas/tests/frame/methods/test_duplicated.py index d5c28a416ffa7..72eec8753315c 100644 --- a/pandas/tests/frame/methods/test_duplicated.py +++ b/pandas/tests/frame/methods/test_duplicated.py @@ -4,7 +4,7 @@ import pytest from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]]) diff --git a/pandas/tests/frame/methods/test_explode.py b/pandas/tests/frame/methods/test_explode.py index 545a4b5f9421e..76c87ed355492 100644 --- a/pandas/tests/frame/methods/test_explode.py +++ b/pandas/tests/frame/methods/test_explode.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm def test_error(): diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py index 5d7dc5c843ec1..0eb94afc99d94 100644 --- a/pandas/tests/frame/methods/test_isin.py +++ b/pandas/tests/frame/methods/test_isin.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameIsIn: diff --git a/pandas/tests/frame/methods/test_nlargest.py b/pandas/tests/frame/methods/test_nlargest.py index 72299ad6b2bf6..4ce474230b686 100644 --- a/pandas/tests/frame/methods/test_nlargest.py +++ b/pandas/tests/frame/methods/test_nlargest.py @@ -8,7 +8,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/frame/methods/test_pct_change.py b/pandas/tests/frame/methods/test_pct_change.py index ac13a5e146043..8f3f37fb9fff7 100644 --- a/pandas/tests/frame/methods/test_pct_change.py +++ b/pandas/tests/frame/methods/test_pct_change.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFramePctChange: diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index c25b24121d481..9c0ab67e62a1a 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Series, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameQuantile: diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py index efb0c64a4f7ac..bab2db3192b4a 100644 --- a/pandas/tests/frame/methods/test_rank.py +++ b/pandas/tests/frame/methods/test_rank.py @@ -6,7 +6,7 @@ import pandas.util._test_decorators as td from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestRank: diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index b2720f9158c6b..aa91e7a489356 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/frame/methods/test_round.py b/pandas/tests/frame/methods/test_round.py index 96ac012ce7892..0865e03cedc50 100644 --- a/pandas/tests/frame/methods/test_round.py +++ b/pandas/tests/frame/methods/test_round.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameRound: diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py index 7fb8fbbc95627..cfb17de892b1c 100644 --- a/pandas/tests/frame/methods/test_shift.py +++ b/pandas/tests/frame/methods/test_shift.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, date_range, offsets -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameShift: diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 6866aab11d2fa..ebea90e040a79 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import CategoricalDtype, DataFrame, IntervalIndex, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameSortIndex: diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py index e733c01e01740..375023328a95e 100644 --- a/pandas/tests/frame/methods/test_sort_values.py +++ b/pandas/tests/frame/methods/test_sort_values.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Categorical, DataFrame, NaT, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameSortValues: diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py index 556d86bed8f14..7b0adceb57668 100644 --- a/pandas/tests/frame/methods/test_to_dict.py +++ b/pandas/tests/frame/methods/test_to_dict.py @@ -6,7 +6,7 @@ import pytz from pandas import DataFrame, Series, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameToDict: diff --git a/pandas/tests/frame/methods/test_to_records.py b/pandas/tests/frame/methods/test_to_records.py index 18f77088677ec..8099c881c987e 100644 --- a/pandas/tests/frame/methods/test_to_records.py +++ b/pandas/tests/frame/methods/test_to_records.py @@ -4,7 +4,7 @@ import pytest from pandas import CategoricalDtype, DataFrame, MultiIndex, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameToRecords: diff --git a/pandas/tests/frame/methods/test_transpose.py b/pandas/tests/frame/methods/test_transpose.py index 71843053cf3a8..428b9e5068407 100644 --- a/pandas/tests/frame/methods/test_transpose.py +++ b/pandas/tests/frame/methods/test_transpose.py @@ -1,5 +1,5 @@ import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm class TestTranspose: diff --git a/pandas/tests/frame/methods/test_truncate.py b/pandas/tests/frame/methods/test_truncate.py index a021a99a45a5c..ad86ee1266874 100644 --- a/pandas/tests/frame/methods/test_truncate.py +++ b/pandas/tests/frame/methods/test_truncate.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameTruncate: diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 48b373d9c7901..ac945e6f10674 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -25,7 +25,7 @@ date_range, to_datetime, ) -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameAlterAxes: diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 68d49c05eaa37..910230c737a2a 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -20,9 +20,9 @@ to_datetime, to_timedelta, ) +import pandas._testing as tm import pandas.core.algorithms as algorithms import pandas.core.nanops as nanops -import pandas.util.testing as tm def assert_stat_op_calc( diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index f6713d703e112..8093b602dd6f3 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameMisc: diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index 93e165ad3d71e..d3b5d82280ced 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -11,10 +11,10 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, notna +import pandas._testing as tm from pandas.conftest import _get_cython_table_params from pandas.core.apply import frame_apply from pandas.core.base import SpecificationError -import pandas.util.testing as tm @pytest.fixture diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 5ecbe21d113b5..156cc18ded1bf 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -6,8 +6,8 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int -import pandas.util.testing as tm # ------------------------------------------------------------------- # Comparisons diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index d6ef3a7600abb..7effa98fd8213 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameSelectReindex: diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index eb8febb10a646..d301ed969789e 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -15,10 +15,10 @@ date_range, option_context, ) +import pandas._testing as tm from pandas.core.arrays import IntervalArray, integer_array from pandas.core.internals import ObjectBlock from pandas.core.internals.blocks import IntBlock -import pandas.util.testing as tm # Segregated collection of methods that require the BlockManager internal data # structure diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index bfb691a8e75d3..9bad54b051d6c 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameConcatCommon: diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 4e7d8c3054cf2..071c6bb79d30e 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -25,9 +25,9 @@ date_range, isna, ) +import pandas._testing as tm from pandas.arrays import IntervalArray, PeriodArray from pandas.core.construction import create_series_with_explicit_dtype -import pandas.util.testing as tm MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"] MIXED_INT_DTYPES = [ diff --git a/pandas/tests/frame/test_cumulative.py b/pandas/tests/frame/test_cumulative.py index 2deeeb95d057d..b545d6aa8afd3 100644 --- a/pandas/tests/frame/test_cumulative.py +++ b/pandas/tests/frame/test_cumulative.py @@ -9,7 +9,7 @@ import numpy as np from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameCumulativeOps: diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index d8d56e90a2f31..06bb040224455 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -18,8 +18,8 @@ date_range, option_context, ) +import pandas._testing as tm from pandas.core.arrays import integer_array -import pandas.util.testing as tm def _check_cast(df, v): diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py index a0cbc1456afa4..c6e28f3c64f12 100644 --- a/pandas/tests/frame/test_join.py +++ b/pandas/tests/frame/test_join.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, Index, period_range -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index f9a2061aa1ff4..651929216a722 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -8,8 +8,8 @@ import pandas as pd from pandas import Categorical, DataFrame, Series, Timestamp, date_range +import pandas._testing as tm from pandas.tests.frame.common import _check_mixed_float -import pandas.util.testing as tm class TestDataFrameMissingData: diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py index 8c0dd67af4e7d..8bc2aa214e035 100644 --- a/pandas/tests/frame/test_mutate_columns.py +++ b/pandas/tests/frame/test_mutate_columns.py @@ -4,7 +4,7 @@ import pytest from pandas import DataFrame, Index, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm # Column add, remove, delete. diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index 8fed695a483f5..32ead406a3e86 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameNonuniqueIndexes: diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index a4f1c0688b144..c727cb398d53e 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -6,9 +6,9 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Series +import pandas._testing as tm import pandas.core.common as com from pandas.tests.frame.common import _check_mixed_float -import pandas.util.testing as tm class TestDataFrameUnaryOperators: diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py index a545db3365e36..a6b2b334d3ec8 100644 --- a/pandas/tests/frame/test_period.py +++ b/pandas/tests/frame/test_period.py @@ -14,7 +14,7 @@ period_range, to_datetime, ) -import pandas.util.testing as tm +import pandas._testing as tm def _permute(obj): diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index d577ff7c71277..9cd26160ec877 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -8,8 +8,8 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, date_range +import pandas._testing as tm from pandas.core.computation.check import _NUMEXPR_INSTALLED -import pandas.util.testing as tm PARSERS = "python", "pandas" ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne) diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 91610102cf0f9..05bdec4a3a4d2 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -19,7 +19,7 @@ option_context, period_range, ) -import pandas.util.testing as tm +import pandas._testing as tm import pandas.io.formats.format as fmt diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 5acd681933914..b77d3029a5446 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameReshape: diff --git a/pandas/tests/frame/test_sort_values_level_as_str.py b/pandas/tests/frame/test_sort_values_level_as_str.py index b0287d9180859..40526ab27ac9a 100644 --- a/pandas/tests/frame/test_sort_values_level_as_str.py +++ b/pandas/tests/frame/test_sort_values_level_as_str.py @@ -4,7 +4,7 @@ from pandas.errors import PerformanceWarning from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index e1e546256f7cd..4a436d70dc48f 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestDataFrameSubclassing: diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 9985468ac6cd8..e89f4ee07ea00 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -16,7 +16,7 @@ period_range, to_datetime, ) -import pandas.util.testing as tm +import pandas._testing as tm import pandas.tseries.offsets as offsets diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py index 26ab4ff0ded85..b60f2052a988f 100644 --- a/pandas/tests/frame/test_timezones.py +++ b/pandas/tests/frame/test_timezones.py @@ -11,8 +11,8 @@ import pandas as pd from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.indexes.datetimes import date_range -import pandas.util.testing as tm class TestDataFrameTimezones: diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 5c39dcc1a7659..aeff92971b42a 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -18,8 +18,8 @@ read_csv, to_datetime, ) +import pandas._testing as tm import pandas.core.common as com -import pandas.util.testing as tm from pandas.io.common import get_handle diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py index d42bb53880433..7fe22e77c5bf3 100644 --- a/pandas/tests/generic/test_frame.py +++ b/pandas/tests/generic/test_frame.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm from .test_generic import Generic diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index d0c9b3e7a8f76..cdb79dc6606ff 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm # ---------------------------------------------------------------------- # Generic types test cases diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py index 601fc2aa64434..8ad8355f2d530 100644 --- a/pandas/tests/generic/test_series.py +++ b/pandas/tests/generic/test_series.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import MultiIndex, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm from .test_generic import Generic diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 0d8379407fef7..0b72a61ed84de 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -8,10 +8,10 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, concat +import pandas._testing as tm from pandas.core.base import SpecificationError from pandas.core.groupby.generic import _make_unique, _maybe_mangle_lambdas from pandas.core.groupby.grouper import Grouping -import pandas.util.testing as tm def test_agg_regression1(tsframe): diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index 5d50c044cf9f5..5ddda264642de 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -7,8 +7,8 @@ import pandas as pd from pandas import DataFrame, Index, NaT, Series, Timedelta, Timestamp, bdate_range +import pandas._testing as tm from pandas.core.groupby.groupby import DataError -import pandas.util.testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 765bc3bab5d4a..2fe23e15cedc4 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -18,8 +18,8 @@ date_range, period_range, ) +import pandas._testing as tm from pandas.core.base import SpecificationError -import pandas.util.testing as tm from pandas.io.formats.printing import pprint_thing diff --git a/pandas/tests/groupby/conftest.py b/pandas/tests/groupby/conftest.py index 5b8cc86513954..8901af7a90acc 100644 --- a/pandas/tests/groupby/conftest.py +++ b/pandas/tests/groupby/conftest.py @@ -2,8 +2,8 @@ import pytest from pandas import DataFrame, MultiIndex +import pandas._testing as tm from pandas.core.groupby.base import reduction_kernels, transformation_kernels -import pandas.util.testing as tm @pytest.fixture diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 050b1e7c5d3b3..53879cad629b2 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, bdate_range -import pandas.util.testing as tm +import pandas._testing as tm def test_apply_issues(): diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index fcdf599e4ba33..69be1067ce37d 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -6,7 +6,7 @@ from pandas.core.dtypes.common import ensure_int64 from pandas import Index, Series, isna -import pandas.util.testing as tm +import pandas._testing as tm def test_series_grouper(): diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 89ffcd9ee313e..40f844bdaa7c0 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -15,7 +15,7 @@ Series, qcut, ) -import pandas.util.testing as tm +import pandas._testing as tm def cartesian_product_for_groupers(result, args, names): diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 8e9554085b9ee..d88f293c99e0f 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -4,7 +4,7 @@ import pytest from pandas import DataFrame, MultiIndex, Period, Series, Timedelta, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm class TestCounting: diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index b3ee12b6691d7..c16ad812eb634 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Series, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm def test_filter_series(): diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index c41c9b4db053a..19b00502da5dc 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -20,8 +20,9 @@ date_range, isna, ) +import pandas._testing as tm import pandas.core.nanops as nanops -from pandas.util import _test_decorators as td, testing as tm +from pandas.util import _test_decorators as td @pytest.mark.parametrize("agg_func", ["any", "all"]) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 242434948fc6f..f9a77cd584d46 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -9,9 +9,9 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv +import pandas._testing as tm from pandas.core.base import SpecificationError import pandas.core.common as com -import pandas.util.testing as tm def test_repr(): diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 04c707acafab2..70ba21d89d22f 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -13,8 +13,8 @@ Timestamp, date_range, ) +import pandas._testing as tm from pandas.core.groupby.grouper import Grouping -import pandas.util.testing as tm # selection # -------------------------------- diff --git a/pandas/tests/groupby/test_index_as_string.py b/pandas/tests/groupby/test_index_as_string.py index f5c8873ff9417..971a447b84cae 100644 --- a/pandas/tests/groupby/test_index_as_string.py +++ b/pandas/tests/groupby/test_index_as_string.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture(params=[["inner"], ["inner", "outer"]]) diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index f83b284a35377..1d8883b60d4a3 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, isna -import pandas.util.testing as tm +import pandas._testing as tm def test_first_last_nth(df): diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py index 8f0df9051fc73..3461bf6e10662 100644 --- a/pandas/tests/groupby/test_rank.py +++ b/pandas/tests/groupby/test_rank.py @@ -3,8 +3,8 @@ import pandas as pd from pandas import DataFrame, Series, concat +import pandas._testing as tm from pandas.core.base import DataError -import pandas.util.testing as tm def test_rank_apply(): diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index 109382d97440e..6b8bd9e805a0c 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -9,9 +9,9 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range +import pandas._testing as tm from pandas.core.groupby.grouper import Grouper from pandas.core.groupby.ops import BinGrouper -import pandas.util.testing as tm class TestGroupBy: diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 27dd314f0df8e..ebf75191806fb 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -18,8 +18,8 @@ concat, date_range, ) +import pandas._testing as tm from pandas.core.groupby.groupby import DataError -import pandas.util.testing as tm def assert_fp_equal(a, b): diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py index c76ee09f977b5..5acf71edf2b63 100644 --- a/pandas/tests/groupby/test_value_counts.py +++ b/pandas/tests/groupby/test_value_counts.py @@ -10,7 +10,7 @@ import pytest from pandas import DataFrame, Grouper, MultiIndex, Series, date_range, to_datetime -import pandas.util.testing as tm +import pandas._testing as tm # our starting frame diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py index 48ea2646c52fc..6a5e531416ecb 100644 --- a/pandas/tests/groupby/test_whitelist.py +++ b/pandas/tests/groupby/test_whitelist.py @@ -9,12 +9,12 @@ import pytest from pandas import DataFrame, Index, MultiIndex, Series, date_range +import pandas._testing as tm from pandas.core.groupby.base import ( groupby_other_methods, reduction_kernels, transformation_kernels, ) -import pandas.util.testing as tm AGG_FUNCTIONS = [ "sum", diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 306ac84ef1832..61e30d3e5c139 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -9,8 +9,8 @@ import pandas as pd from pandas import Categorical, IntervalIndex +import pandas._testing as tm from pandas.core.indexes.api import CategoricalIndex, Index -import pandas.util.testing as tm from ..common import Base diff --git a/pandas/tests/indexes/categorical/test_constructors.py b/pandas/tests/indexes/categorical/test_constructors.py index f3d580b7215c2..1df0874e2f947 100644 --- a/pandas/tests/indexes/categorical/test_constructors.py +++ b/pandas/tests/indexes/categorical/test_constructors.py @@ -2,7 +2,7 @@ import pytest from pandas import Categorical, CategoricalDtype, CategoricalIndex, Index -import pandas.util.testing as tm +import pandas._testing as tm class TestCategoricalIndexConstructors: diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 68cca473d6bb0..ceb3ac8b61c0b 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -23,9 +23,9 @@ UInt64Index, isna, ) +import pandas._testing as tm from pandas.core.indexes.base import InvalidIndexError from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin -import pandas.util.testing as tm class Base: diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py index 2a9a8bf8d824f..e3e7ff4093b76 100644 --- a/pandas/tests/indexes/conftest.py +++ b/pandas/tests/indexes/conftest.py @@ -2,8 +2,8 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.core.indexes.api import Index, MultiIndex -import pandas.util.testing as tm indices_dict = { "unicode": tm.makeUnicodeIndex(100), diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index 6eedfca129856..3c72d34d84b28 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -3,7 +3,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from .common import Base diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index eabf293ae915f..6139726dc34e4 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -17,7 +17,7 @@ Timestamp, date_range, ) -import pandas.util.testing as tm +import pandas._testing as tm class TestDatetimeIndex: diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 18f8a9ee60167..c75c296e724db 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -11,8 +11,8 @@ import pandas as pd from pandas import DatetimeIndex, Index, Timestamp, date_range, offsets, to_datetime +import pandas._testing as tm from pandas.core.arrays import DatetimeArray, period_array -import pandas.util.testing as tm class TestDatetimeIndex: diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 36cdaa8a6029b..f9df295284806 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -14,7 +14,7 @@ import pandas as pd from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import ( BDay, diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 2ccebe426e024..ca18d6fbea11a 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex, Index, Timestamp, date_range, offsets -import pandas.util.testing as tm +import pandas._testing as tm randn = np.random.randn diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py index 2ff6853b98929..da1bd6f091d1a 100644 --- a/pandas/tests/indexes/datetimes/test_datetimelike.py +++ b/pandas/tests/indexes/datetimes/test_datetimelike.py @@ -2,7 +2,7 @@ import pytest from pandas import DatetimeIndex, date_range -import pandas.util.testing as tm +import pandas._testing as tm from ..datetimelike import DatetimeLike diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py index 33a744cc25ca1..f34019e06fd5f 100644 --- a/pandas/tests/indexes/datetimes/test_formats.py +++ b/pandas/tests/indexes/datetimes/test_formats.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import DatetimeIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm def test_to_native_types(): diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 02ea857550a9b..ef0d2cd2e48cc 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DatetimeIndex, Index, Timestamp, date_range, notna -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import BDay, CDay diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index afc3bed85a8d2..340f53b2868bd 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import DatetimeIndex, Index, Timestamp, date_range, offsets -import pandas.util.testing as tm +import pandas._testing as tm class TestTimeSeries: diff --git a/pandas/tests/indexes/datetimes/test_missing.py b/pandas/tests/indexes/datetimes/test_missing.py index 6d94319b33b02..3399c8eaf6750 100644 --- a/pandas/tests/indexes/datetimes/test_missing.py +++ b/pandas/tests/indexes/datetimes/test_missing.py @@ -1,7 +1,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm class TestDatetimeIndex: diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index fb032947143d3..ecd4ace705e9e 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -16,8 +16,8 @@ bdate_range, date_range, ) +import pandas._testing as tm from pandas.tests.base.test_ops import Ops -import pandas.util.testing as tm from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 8d5aa64a49cf2..e30cc4449e01e 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -16,8 +16,8 @@ Timestamp, date_range, ) +import pandas._testing as tm from pandas.core.indexing import IndexingError -import pandas.util.testing as tm class TestSlicing: diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py index 62383555f6048..84eee2419f0b8 100644 --- a/pandas/tests/indexes/datetimes/test_scalar_compat.py +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import DatetimeIndex, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.frequencies import to_offset diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index 3fb39b2081d83..f7960c114ec9d 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -16,7 +16,7 @@ date_range, to_datetime, ) -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import BMonthEnd, Minute, MonthEnd diff --git a/pandas/tests/indexes/datetimes/test_shift.py b/pandas/tests/indexes/datetimes/test_shift.py index 6f8315debdfa9..1c87995931c62 100644 --- a/pandas/tests/indexes/datetimes/test_shift.py +++ b/pandas/tests/indexes/datetimes/test_shift.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import DatetimeIndex, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDatetimeIndexShift: diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 3f942f9b79428..1505ac1dff29c 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -22,7 +22,7 @@ isna, to_datetime, ) -import pandas.util.testing as tm +import pandas._testing as tm class FixedOffset(tzinfo): diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 807d0b05e8d13..a6a43697c36dc 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -30,9 +30,9 @@ isna, to_datetime, ) +import pandas._testing as tm from pandas.core.arrays import DatetimeArray from pandas.core.tools import datetimes as tools -import pandas.util.testing as tm class TestTimeConversionFormats: diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py index 708cd8a4579e8..2b1742d58b77e 100644 --- a/pandas/tests/indexes/interval/test_astype.py +++ b/pandas/tests/indexes/interval/test_astype.py @@ -12,7 +12,7 @@ Timestamp, interval_range, ) -import pandas.util.testing as tm +import pandas._testing as tm class Base: diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py index 339bdaf79c690..91f8dddea71d7 100644 --- a/pandas/tests/indexes/interval/test_base.py +++ b/pandas/tests/indexes/interval/test_base.py @@ -2,8 +2,8 @@ import pytest from pandas import IntervalIndex, Series, date_range +import pandas._testing as tm from pandas.tests.indexes.common import Base -import pandas.util.testing as tm class TestBase(Base): diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py index 98c1f7c6c2a8a..13a45df743cf5 100644 --- a/pandas/tests/indexes/interval/test_constructors.py +++ b/pandas/tests/indexes/interval/test_constructors.py @@ -19,9 +19,9 @@ period_range, timedelta_range, ) +import pandas._testing as tm from pandas.core.arrays import IntervalArray import pandas.core.common as com -import pandas.util.testing as tm @pytest.fixture(params=[None, "foo"]) diff --git a/pandas/tests/indexes/interval/test_formats.py b/pandas/tests/indexes/interval/test_formats.py index dcc0c818182ab..7acf5c1e0906c 100644 --- a/pandas/tests/indexes/interval/test_formats.py +++ b/pandas/tests/indexes/interval/test_formats.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, IntervalIndex, Series, Timedelta, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm class TestIntervalIndexRendering: diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py index 15ea9a6b62c20..3c54442cf40c5 100644 --- a/pandas/tests/indexes/interval/test_indexing.py +++ b/pandas/tests/indexes/interval/test_indexing.py @@ -11,8 +11,8 @@ date_range, timedelta_range, ) +import pandas._testing as tm from pandas.core.indexes.base import InvalidIndexError -import pandas.util.testing as tm class TestGetLoc: diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 6ad7dfb22f2b3..58f88ffb16318 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -17,8 +17,8 @@ notna, timedelta_range, ) +import pandas._testing as tm import pandas.core.common as com -import pandas.util.testing as tm @pytest.fixture(scope="class", params=[None, "foo"]) diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 7891666e6cdba..2f28c33a3bbc6 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -15,7 +15,7 @@ interval_range, timedelta_range, ) -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import Day diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index 0a92192ee6a0f..476ec1dd10b4b 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -6,7 +6,7 @@ from pandas._libs.interval import IntervalTree from pandas import compat -import pandas.util.testing as tm +import pandas._testing as tm def skipif_32bit(param): diff --git a/pandas/tests/indexes/interval/test_setops.py b/pandas/tests/indexes/interval/test_setops.py index 89e733c30b1e3..3246ac6bafde9 100644 --- a/pandas/tests/indexes/interval/test_setops.py +++ b/pandas/tests/indexes/interval/test_setops.py @@ -2,7 +2,7 @@ import pytest from pandas import Index, IntervalIndex, Timestamp, interval_range -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture(scope="class", params=[None, "foo"]) diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py index a6d08c845d941..8e6a360af797b 100644 --- a/pandas/tests/indexes/multi/test_analytics.py +++ b/pandas/tests/indexes/multi/test_analytics.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Index, MultiIndex, date_range, period_range -import pandas.util.testing as tm +import pandas._testing as tm def test_shift(idx): diff --git a/pandas/tests/indexes/multi/test_astype.py b/pandas/tests/indexes/multi/test_astype.py index 93fdeb10b849a..29908537fbe59 100644 --- a/pandas/tests/indexes/multi/test_astype.py +++ b/pandas/tests/indexes/multi/test_astype.py @@ -3,7 +3,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype -import pandas.util.testing as tm +import pandas._testing as tm def test_astype(idx): diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py index b02f87dc4aacb..d92cff1e10496 100644 --- a/pandas/tests/indexes/multi/test_compat.py +++ b/pandas/tests/indexes/multi/test_compat.py @@ -2,7 +2,7 @@ import pytest from pandas import MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_numeric_compat(idx): diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py index 0e4d144c0fd34..4beae4fa1a9af 100644 --- a/pandas/tests/indexes/multi/test_constructors.py +++ b/pandas/tests/indexes/multi/test_constructors.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import Index, MultiIndex, date_range -import pandas.util.testing as tm +import pandas._testing as tm def test_constructor_single_level(): diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py index 64d2859cd13db..4b0895c823b8b 100644 --- a/pandas/tests/indexes/multi/test_contains.py +++ b/pandas/tests/indexes/multi/test_contains.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_contains_top_level(): diff --git a/pandas/tests/indexes/multi/test_conversion.py b/pandas/tests/indexes/multi/test_conversion.py index fab4f72dc153b..8956e6ed4996f 100644 --- a/pandas/tests/indexes/multi/test_conversion.py +++ b/pandas/tests/indexes/multi/test_conversion.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex, date_range -import pandas.util.testing as tm +import pandas._testing as tm def test_tolist(idx): diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py index 12cd0db6936f5..1acc65aef8b8a 100644 --- a/pandas/tests/indexes/multi/test_copy.py +++ b/pandas/tests/indexes/multi/test_copy.py @@ -3,7 +3,7 @@ import pytest from pandas import MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def assert_multiindex_copied(copy, original): diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py index 364420a292ed5..25261dd25d717 100644 --- a/pandas/tests/indexes/multi/test_drop.py +++ b/pandas/tests/indexes/multi/test_drop.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_drop(idx): diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py index ee1f068b92df1..93e1de535835f 100644 --- a/pandas/tests/indexes/multi/test_duplicates.py +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -6,7 +6,7 @@ from pandas._libs import hashtable from pandas import DatetimeIndex, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("names", [None, ["first", "second"]]) diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py index c81af5a0c6c49..063ede028add7 100644 --- a/pandas/tests/indexes/multi/test_equivalence.py +++ b/pandas/tests/indexes/multi/test_equivalence.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Index, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm def test_equals(idx): diff --git a/pandas/tests/indexes/multi/test_format.py b/pandas/tests/indexes/multi/test_format.py index 3a8063aed8d20..75f23fb2f32ba 100644 --- a/pandas/tests/indexes/multi/test_format.py +++ b/pandas/tests/indexes/multi/test_format.py @@ -4,7 +4,7 @@ import pandas as pd from pandas import MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_format(idx): diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py index ec3c654ecb1ed..074072ae581b2 100644 --- a/pandas/tests/indexes/multi/test_get_set.py +++ b/pandas/tests/indexes/multi/test_get_set.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import CategoricalIndex, Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def assert_matching(actual, expected, check_dtype=False): diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 9ef2a77205acc..176d47a3bdb9b 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -12,8 +12,8 @@ MultiIndex, date_range, ) +import pandas._testing as tm from pandas.core.indexes.base import InvalidIndexError -import pandas.util.testing as tm def test_slice_locs_partial(idx): diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py index a8711533e806c..f2ec15e0af88c 100644 --- a/pandas/tests/indexes/multi/test_integrity.py +++ b/pandas/tests/indexes/multi/test_integrity.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import IntervalIndex, MultiIndex, RangeIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_labels_dtypes(): diff --git a/pandas/tests/indexes/multi/test_join.py b/pandas/tests/indexes/multi/test_join.py index 31ab521958342..062fb92c44552 100644 --- a/pandas/tests/indexes/multi/test_join.py +++ b/pandas/tests/indexes/multi/test_join.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py index f053f690e1018..a17e1e9928bff 100644 --- a/pandas/tests/indexes/multi/test_missing.py +++ b/pandas/tests/indexes/multi/test_missing.py @@ -5,8 +5,8 @@ import pandas as pd from pandas import Int64Index, MultiIndex, PeriodIndex, UInt64Index +import pandas._testing as tm from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin -import pandas.util.testing as tm def test_fillna(idx): diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py index 47f2ec4c8a418..479b5ef0211a0 100644 --- a/pandas/tests/indexes/multi/test_names.py +++ b/pandas/tests/indexes/multi/test_names.py @@ -2,7 +2,7 @@ import pandas as pd from pandas import MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def check_level_names(index, names): diff --git a/pandas/tests/indexes/multi/test_partial_indexing.py b/pandas/tests/indexes/multi/test_partial_indexing.py index 5db1296d828ca..b00018d2ceb69 100644 --- a/pandas/tests/indexes/multi/test_partial_indexing.py +++ b/pandas/tests/indexes/multi/test_partial_indexing.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex, date_range -import pandas.util.testing as tm +import pandas._testing as tm def test_partial_string_timestamp_multiindex(): diff --git a/pandas/tests/indexes/multi/test_reindex.py b/pandas/tests/indexes/multi/test_reindex.py index 513efa8941de8..ceb14aa82a76c 100644 --- a/pandas/tests/indexes/multi/test_reindex.py +++ b/pandas/tests/indexes/multi/test_reindex.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_reindex(idx): diff --git a/pandas/tests/indexes/multi/test_reshape.py b/pandas/tests/indexes/multi/test_reshape.py index 37df420e9ea2e..2e39c714ca7af 100644 --- a/pandas/tests/indexes/multi/test_reshape.py +++ b/pandas/tests/indexes/multi/test_reshape.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_insert(idx): diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py index 835784054261e..841e3b3f17b38 100644 --- a/pandas/tests/indexes/multi/test_setops.py +++ b/pandas/tests/indexes/multi/test_setops.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("case", [0.5, "xxx"]) diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py index 3dee1dbecf3ba..062c0ba9f4759 100644 --- a/pandas/tests/indexes/multi/test_sorting.py +++ b/pandas/tests/indexes/multi/test_sorting.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, RangeIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_sortlevel(idx): diff --git a/pandas/tests/indexes/period/test_asfreq.py b/pandas/tests/indexes/period/test_asfreq.py index fd6013ab5ae08..88e800d66f3ad 100644 --- a/pandas/tests/indexes/period/test_asfreq.py +++ b/pandas/tests/indexes/period/test_asfreq.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, PeriodIndex, Series, period_range -import pandas.util.testing as tm +import pandas._testing as tm class TestPeriodIndex: diff --git a/pandas/tests/indexes/period/test_astype.py b/pandas/tests/indexes/period/test_astype.py index fa57ec2b1f7ca..ec386dd9dd11c 100644 --- a/pandas/tests/indexes/period/test_astype.py +++ b/pandas/tests/indexes/period/test_astype.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Index, Int64Index, NaT, Period, PeriodIndex, period_range -import pandas.util.testing as tm +import pandas._testing as tm class TestPeriodIndexAsType: diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py index d87e49e3cba2a..27ee915e48e5c 100644 --- a/pandas/tests/indexes/period/test_constructors.py +++ b/pandas/tests/indexes/period/test_constructors.py @@ -7,8 +7,8 @@ import pandas as pd from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range +import pandas._testing as tm from pandas.core.arrays import PeriodArray -import pandas.util.testing as tm class TestPeriodIndex: diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py index 2a88b79f381c4..5db373a9f07ae 100644 --- a/pandas/tests/indexes/period/test_formats.py +++ b/pandas/tests/indexes/period/test_formats.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import PeriodIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_to_native_types(): diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index e17f0af24760c..8b5a2958c4c61 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range -import pandas.util.testing as tm +import pandas._testing as tm class TestGetItem: diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 962e674fa607f..427d9ab712320 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -3,9 +3,9 @@ import pandas as pd from pandas import DatetimeIndex, Index, NaT, PeriodIndex, Series +import pandas._testing as tm from pandas.core.arrays import PeriodArray from pandas.tests.base.test_ops import Ops -import pandas.util.testing as tm class TestPeriodIndexOps(Ops): diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py index 501c2a4d8edcc..9ca2dd169416f 100644 --- a/pandas/tests/indexes/period/test_partial_slicing.py +++ b/pandas/tests/indexes/period/test_partial_slicing.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Period, Series, period_range -import pandas.util.testing as tm +import pandas._testing as tm class TestPeriodIndex: diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index da2f04d45fdac..af5aa54c60476 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -17,7 +17,7 @@ offsets, period_range, ) -import pandas.util.testing as tm +import pandas._testing as tm from ..datetimelike import DatetimeLike diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py index 828fab08daceb..2c3d22198df9f 100644 --- a/pandas/tests/indexes/period/test_period_range.py +++ b/pandas/tests/indexes/period/test_period_range.py @@ -1,7 +1,7 @@ import pytest from pandas import NaT, Period, PeriodIndex, date_range, period_range -import pandas.util.testing as tm +import pandas._testing as tm class TestPeriodRange: diff --git a/pandas/tests/indexes/period/test_scalar_compat.py b/pandas/tests/indexes/period/test_scalar_compat.py index 7956b9f26e6ef..d9809f0f75611 100644 --- a/pandas/tests/indexes/period/test_scalar_compat.py +++ b/pandas/tests/indexes/period/test_scalar_compat.py @@ -1,7 +1,7 @@ """Tests for PeriodIndex behaving like a vectorized Period scalar""" from pandas import Timedelta, date_range, period_range -import pandas.util.testing as tm +import pandas._testing as tm class TestPeriodIndexOps: diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index 173d61849b126..1ec53c1dac81c 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -3,8 +3,8 @@ import pandas as pd from pandas import Index, PeriodIndex, date_range, period_range +import pandas._testing as tm import pandas.core.indexes.period as period -import pandas.util.testing as tm def _permute(obj): diff --git a/pandas/tests/indexes/period/test_shift.py b/pandas/tests/indexes/period/test_shift.py index 7543f85c6d138..5689e98c33455 100644 --- a/pandas/tests/indexes/period/test_shift.py +++ b/pandas/tests/indexes/period/test_shift.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import PeriodIndex, period_range -import pandas.util.testing as tm +import pandas._testing as tm class TestPeriodIndexShift: diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index a9c0ecd1a3041..fc861b88d1f1b 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -17,8 +17,8 @@ period_range, to_datetime, ) +import pandas._testing as tm import pandas.core.indexes.period as period -import pandas.util.testing as tm class TestPeriodRepresentation: diff --git a/pandas/tests/indexes/ranges/test_constructors.py b/pandas/tests/indexes/ranges/test_constructors.py index 5067b6c74871b..ba1de6d551d6b 100644 --- a/pandas/tests/indexes/ranges/test_constructors.py +++ b/pandas/tests/indexes/ranges/test_constructors.py @@ -4,7 +4,7 @@ import pytest from pandas import Index, RangeIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestRangeIndexConstructors: diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index f7abdf53e0975..8d98ab18963b6 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Float64Index, Index, Int64Index, RangeIndex -import pandas.util.testing as tm +import pandas._testing as tm from ..test_numeric import Numeric diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index 5c1e461c9fcf0..5bedc4089feba 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -4,7 +4,7 @@ import pytest from pandas import Index, Int64Index, RangeIndex -import pandas.util.testing as tm +import pandas._testing as tm class TestRangeIndexSetOps: diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 6ec35a32d74ce..4a773cc1c6f49 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -32,6 +32,7 @@ isna, period_range, ) +import pandas._testing as tm from pandas.core.algorithms import safe_sort from pandas.core.indexes.api import ( Index, @@ -42,7 +43,6 @@ ) from pandas.tests.indexes.common import Base from pandas.tests.indexes.conftest import indices_dict -import pandas.util.testing as tm class TestIndex(Base): diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 82ef71efa70d0..7e30233353553 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -14,7 +14,7 @@ import pandas as pd from pandas import CategoricalIndex, MultiIndex, RangeIndex -import pandas.util.testing as tm +import pandas._testing as tm class TestCommon: diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 7187733fc91c3..69a3983128a51 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -8,9 +8,9 @@ import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index +import pandas._testing as tm from pandas.api.types import pandas_dtype from pandas.tests.indexes.common import Base -import pandas.util.testing as tm class Numeric(Base): diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py index 3d24c70afdda2..583556656ac87 100644 --- a/pandas/tests/indexes/test_numpy_compat.py +++ b/pandas/tests/indexes/test_numpy_compat.py @@ -12,8 +12,8 @@ _np_version_under1p17, _np_version_under1p18, ) +import pandas._testing as tm from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin -import pandas.util.testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index a7e2363ec422e..abfa413d56655 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -11,9 +11,9 @@ import pandas as pd from pandas import Float64Index, Int64Index, RangeIndex, UInt64Index +import pandas._testing as tm from pandas.api.types import pandas_dtype from pandas.tests.indexes.conftest import indices_dict -import pandas.util.testing as tm COMPATIBLE_INCONSISTENT_PAIRS = { (Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex), diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py index e479d93af2902..82c9d995c9c7c 100644 --- a/pandas/tests/indexes/timedeltas/test_astype.py +++ b/pandas/tests/indexes/timedeltas/test_astype.py @@ -13,7 +13,7 @@ TimedeltaIndex, timedelta_range, ) -import pandas.util.testing as tm +import pandas._testing as tm class TestTimedeltaIndex: diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py index ff6ee051755bb..e9540c950a32b 100644 --- a/pandas/tests/indexes/timedeltas/test_constructors.py +++ b/pandas/tests/indexes/timedeltas/test_constructors.py @@ -5,8 +5,8 @@ import pandas as pd from pandas import Timedelta, TimedeltaIndex, timedelta_range, to_timedelta +import pandas._testing as tm from pandas.core.arrays import TimedeltaArray -import pandas.util.testing as tm class TestTimedeltaIndex: diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index 17ab85033acfb..0114dfef548de 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Index, Timedelta, TimedeltaIndex, timedelta_range -import pandas.util.testing as tm +import pandas._testing as tm class TestGetItem: diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 56043cf3edb2d..25f27da758ad8 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -7,8 +7,8 @@ import pandas as pd from pandas import Series, TimedeltaIndex, timedelta_range +import pandas._testing as tm from pandas.tests.base.test_ops import Ops -import pandas.util.testing as tm from pandas.tseries.offsets import Day, Hour diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py index 4448b5e39684b..29e2c7dd20be0 100644 --- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py +++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Series, Timedelta, timedelta_range -import pandas.util.testing as tm +import pandas._testing as tm class TestSlicing: diff --git a/pandas/tests/indexes/timedeltas/test_scalar_compat.py b/pandas/tests/indexes/timedeltas/test_scalar_compat.py index 38f1d2c7d4a1b..44f4a2adedaad 100644 --- a/pandas/tests/indexes/timedeltas/test_scalar_compat.py +++ b/pandas/tests/indexes/timedeltas/test_scalar_compat.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range -import pandas.util.testing as tm +import pandas._testing as tm class TestVectorizedTimedelta: diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py index 34db7ed419ddb..b2024d04efc66 100644 --- a/pandas/tests/indexes/timedeltas/test_setops.py +++ b/pandas/tests/indexes/timedeltas/test_setops.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Int64Index, TimedeltaIndex, timedelta_range -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import Hour diff --git a/pandas/tests/indexes/timedeltas/test_shift.py b/pandas/tests/indexes/timedeltas/test_shift.py index 048b29c0da501..98933ff0423ab 100644 --- a/pandas/tests/indexes/timedeltas/test_shift.py +++ b/pandas/tests/indexes/timedeltas/test_shift.py @@ -4,7 +4,7 @@ import pandas as pd from pandas import TimedeltaIndex -import pandas.util.testing as tm +import pandas._testing as tm class TestTimedeltaIndexShift: diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index fa74ff2d30368..7703fbbbcad2a 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -14,7 +14,7 @@ date_range, timedelta_range, ) -import pandas.util.testing as tm +import pandas._testing as tm from ..datetimelike import DatetimeLike diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py index 1c1d0f1a735cf..1cef9de6a3a77 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import timedelta_range, to_timedelta -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import Day, Second diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/indexes/timedeltas/test_tools.py index 5bd7a2a583b84..223bde8b0e2c2 100644 --- a/pandas/tests/indexes/timedeltas/test_tools.py +++ b/pandas/tests/indexes/timedeltas/test_tools.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Series, TimedeltaIndex, isna, to_timedelta -import pandas.util.testing as tm +import pandas._testing as tm class TestTimedeltas: diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index 08e8dbad4e102..3c027b035c2b8 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -7,7 +7,7 @@ from pandas.core.dtypes.common import is_scalar from pandas import DataFrame, Float64Index, MultiIndex, Series, UInt64Index, date_range -import pandas.util.testing as tm +import pandas._testing as tm def _mklbl(prefix, n): diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index 5d23236207f94..634020982b1c2 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, IntervalIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestIntervalIndex: diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py index a86a9d16d3f9f..43036fbbd9844 100644 --- a/pandas/tests/indexing/interval/test_interval_new.py +++ b/pandas/tests/indexing/interval/test_interval_new.py @@ -4,7 +4,7 @@ import pytest from pandas import Interval, IntervalIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestIntervalIndex: diff --git a/pandas/tests/indexing/multiindex/conftest.py b/pandas/tests/indexing/multiindex/conftest.py index e58e6ed0d5d83..e6d5a9eb84410 100644 --- a/pandas/tests/indexing/multiindex/conftest.py +++ b/pandas/tests/indexing/multiindex/conftest.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py index e0206c8e7f6aa..4051d7c5fe374 100644 --- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py +++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py @@ -2,8 +2,8 @@ import pytest from pandas import DataFrame, MultiIndex, Series +import pandas._testing as tm import pandas.core.common as com -import pandas.util.testing as tm def test_detect_chained_assignment(): diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py index 519a1eb5b16d8..8ea825da8f94f 100644 --- a/pandas/tests/indexing/multiindex/test_getitem.py +++ b/pandas/tests/indexing/multiindex/test_getitem.py @@ -2,8 +2,8 @@ import pytest from pandas import DataFrame, Index, MultiIndex, Series +import pandas._testing as tm from pandas.core.indexing import IndexingError -import pandas.util.testing as tm # ---------------------------------------------------------------------------- # test indexing of Series with multi-level Index diff --git a/pandas/tests/indexing/multiindex/test_iloc.py b/pandas/tests/indexing/multiindex/test_iloc.py index 2c2e4d06f1ae3..9859c7235c380 100644 --- a/pandas/tests/indexing/multiindex/test_iloc.py +++ b/pandas/tests/indexing/multiindex/test_iloc.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/indexing/multiindex/test_indexing_slow.py b/pandas/tests/indexing/multiindex/test_indexing_slow.py index aab44daf8d17f..8ea1cebd7bf7b 100644 --- a/pandas/tests/indexing/multiindex/test_indexing_slow.py +++ b/pandas/tests/indexing/multiindex/test_indexing_slow.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.slow diff --git a/pandas/tests/indexing/multiindex/test_ix.py b/pandas/tests/indexing/multiindex/test_ix.py index 35f3137dac059..01b0b392d52a3 100644 --- a/pandas/tests/indexing/multiindex/test_ix.py +++ b/pandas/tests/indexing/multiindex/test_ix.py @@ -4,7 +4,7 @@ from pandas.errors import PerformanceWarning from pandas import DataFrame, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm class TestMultiIndex: diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index da7d89a15125b..ce427116ea343 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -3,8 +3,8 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series +import pandas._testing as tm from pandas.core.indexing import IndexingError -import pandas.util.testing as tm @pytest.fixture diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py index 8c6afef1234da..3339cb5f3150d 100644 --- a/pandas/tests/indexing/multiindex/test_multiindex.py +++ b/pandas/tests/indexing/multiindex/test_multiindex.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestMultiIndexBasic: diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py index 05ea949721b65..9d181bdcb9491 100644 --- a/pandas/tests/indexing/multiindex/test_partial.py +++ b/pandas/tests/indexing/multiindex/test_partial.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm class TestMultiIndexPartial: diff --git a/pandas/tests/indexing/multiindex/test_set_ops.py b/pandas/tests/indexing/multiindex/test_set_ops.py index 66cb0d0d46380..f2cbfadb3cfa5 100644 --- a/pandas/tests/indexing/multiindex/test_set_ops.py +++ b/pandas/tests/indexing/multiindex/test_set_ops.py @@ -1,7 +1,7 @@ from numpy.random import randn from pandas import DataFrame, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestMultiIndexSetOps: diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py index 7fc95ba62a888..cb6c3a71fecc4 100644 --- a/pandas/tests/indexing/multiindex/test_setitem.py +++ b/pandas/tests/indexing/multiindex/test_setitem.py @@ -4,8 +4,8 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, isna, notna +import pandas._testing as tm import pandas.core.common as com -import pandas.util.testing as tm class TestMultiIndexSetItem: diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index ee0f160b33cf1..6fa9d3bd2cdbb 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -5,9 +5,9 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp +import pandas._testing as tm from pandas.core.indexing import _non_reducing_slice from pandas.tests.indexing.common import _mklbl -import pandas.util.testing as tm class TestMultiIndexSlicers: diff --git a/pandas/tests/indexing/multiindex/test_sorted.py b/pandas/tests/indexing/multiindex/test_sorted.py index 5b8300827609a..4bec0f429a34e 100644 --- a/pandas/tests/indexing/multiindex/test_sorted.py +++ b/pandas/tests/indexing/multiindex/test_sorted.py @@ -2,7 +2,7 @@ from numpy.random import randn from pandas import DataFrame, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestMultiIndexSorted: diff --git a/pandas/tests/indexing/multiindex/test_xs.py b/pandas/tests/indexing/multiindex/test_xs.py index ffbe1bb785cda..db8c0c643a623 100644 --- a/pandas/tests/indexing/multiindex/test_xs.py +++ b/pandas/tests/indexing/multiindex/test_xs.py @@ -2,8 +2,8 @@ import pytest from pandas import DataFrame, Index, MultiIndex, Series, concat, date_range +import pandas._testing as tm import pandas.core.common as com -import pandas.util.testing as tm @pytest.fixture diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py index 81dedfdc74409..621417eb38d94 100644 --- a/pandas/tests/indexing/test_callable.py +++ b/pandas/tests/indexing/test_callable.py @@ -1,7 +1,7 @@ import numpy as np import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm class TestIndexingCallable: diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 40fd6575abf44..4a33dbd8fc7bd 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -16,8 +16,8 @@ Timestamp, conftest, ) +import pandas._testing as tm from pandas.api.types import CategoricalDtype as CDT -import pandas.util.testing as tm class TestCategoricalIndex: diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index 760bb655534b2..785448e910217 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -3,8 +3,8 @@ import pandas as pd from pandas import DataFrame, Series, Timestamp, date_range, option_context +import pandas._testing as tm import pandas.core.common as com -import pandas.util.testing as tm class TestCaching: diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 256aaef8eb5a7..8843f6c08fe80 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -7,7 +7,7 @@ import pandas.compat as compat import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm ############################################################### # Index / Series common tests which may trigger dtype coercions diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index f2e3f7f6b3723..42f992339f036 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestDatetimeIndex: diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 52d0e30f0bcad..75bf23b39a935 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestFloatIndexers: diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index d4731bcdc5b46..26dedf02e7333 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -8,10 +8,10 @@ import pandas as pd from pandas import DataFrame, Series, concat, date_range, isna +import pandas._testing as tm from pandas.api.types import is_scalar from pandas.core.indexing import IndexingError from pandas.tests.indexing.common import Base -import pandas.util.testing as tm class TestiLoc(Base): diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index ea003a72490f9..be921c813e2fa 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -13,11 +13,11 @@ import pandas as pd from pandas import DataFrame, Index, NaT, Series +import pandas._testing as tm from pandas.core.generic import NDFrame from pandas.core.indexers import validate_indices from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice from pandas.tests.indexing.common import Base, _mklbl -import pandas.util.testing as tm # ------------------------------------------------------------------------ # Indexing test cases diff --git a/pandas/tests/indexing/test_indexing_engines.py b/pandas/tests/indexing/test_indexing_engines.py index 7303c1ff3d111..edb5d7d7f3a57 100644 --- a/pandas/tests/indexing/test_indexing_engines.py +++ b/pandas/tests/indexing/test_indexing_engines.py @@ -2,7 +2,7 @@ from pandas._libs import algos as libalgos, index as libindex -import pandas.util.testing as tm +import pandas._testing as tm class TestNumericEngine: diff --git a/pandas/tests/indexing/test_indexing_slow.py b/pandas/tests/indexing/test_indexing_slow.py index bf8c6afd00561..2ffa44bec14a6 100644 --- a/pandas/tests/indexing/test_indexing_slow.py +++ b/pandas/tests/indexing/test_indexing_slow.py @@ -1,7 +1,7 @@ import pytest from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm class TestIndexingSlow: diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 802bc43ae8052..c74435e9a9347 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -7,9 +7,9 @@ import pandas as pd from pandas import DataFrame, Series, Timestamp, date_range +import pandas._testing as tm from pandas.api.types import is_scalar from pandas.tests.indexing.common import Base -import pandas.util.testing as tm class TestLoc(Base): diff --git a/pandas/tests/indexing/test_na_indexing.py b/pandas/tests/indexing/test_na_indexing.py index 4b92df581d164..befe4fee8ecf8 100644 --- a/pandas/tests/indexing/test_na_indexing.py +++ b/pandas/tests/indexing/test_na_indexing.py @@ -1,7 +1,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 15c65be37e0d9..5fda759020f1a 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestPartialSetting: diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index ddaea5b597d6d..362a2c00e6775 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -4,8 +4,8 @@ import pytest from pandas import DataFrame, Series, Timedelta, Timestamp, date_range +import pandas._testing as tm from pandas.tests.indexing.common import Base -import pandas.util.testing as tm class TestScalar(Base): diff --git a/pandas/tests/indexing/test_timedelta.py b/pandas/tests/indexing/test_timedelta.py index 5c9865ddc7090..dd4750123c0b5 100644 --- a/pandas/tests/indexing/test_timedelta.py +++ b/pandas/tests/indexing/test_timedelta.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm class TestTimedeltaIndexing: diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 551782d0b363a..f20e9ef6d7b57 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -19,10 +19,10 @@ Series, SparseArray, ) +import pandas._testing as tm import pandas.core.algorithms as algos from pandas.core.arrays import DatetimeArray, TimedeltaArray from pandas.core.internals import BlockManager, SingleBlockManager, make_block -import pandas.util.testing as tm @pytest.fixture diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index 3f034107ef24f..7810778602e12 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -2,7 +2,7 @@ import pytest -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.parsers import read_csv diff --git a/pandas/tests/io/excel/conftest.py b/pandas/tests/io/excel/conftest.py index 4495ba9b80b67..a257735dc1ec5 100644 --- a/pandas/tests/io/excel/conftest.py +++ b/pandas/tests/io/excel/conftest.py @@ -2,7 +2,7 @@ import pandas.util._test_decorators as td -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.parsers import read_csv diff --git a/pandas/tests/io/excel/test_odf.py b/pandas/tests/io/excel/test_odf.py index 6e5610f4f5838..b9a3e8b59b133 100644 --- a/pandas/tests/io/excel/test_odf.py +++ b/pandas/tests/io/excel/test_odf.py @@ -4,7 +4,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm pytest.importorskip("odf") diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py index e8c60870e2a85..10ed192062d9c 100644 --- a/pandas/tests/io/excel/test_openpyxl.py +++ b/pandas/tests/io/excel/test_openpyxl.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.excel import ExcelWriter, _OpenpyxlWriter diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 4f731eb2b272c..629d3d02028bd 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -13,7 +13,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm @contextlib.contextmanager diff --git a/pandas/tests/io/excel/test_style.py b/pandas/tests/io/excel/test_style.py index 41363bf13ed4e..88f4c3736bc0d 100644 --- a/pandas/tests/io/excel/test_style.py +++ b/pandas/tests/io/excel/test_style.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.excel import ExcelWriter from pandas.io.formats.excel import ExcelFormatter diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index c394bc87c99e7..55b987a599670 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, get_option, set_option -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.excel import ( ExcelFile, diff --git a/pandas/tests/io/excel/test_xlrd.py b/pandas/tests/io/excel/test_xlrd.py index e04dfc97d4968..d1f900a2dc58b 100644 --- a/pandas/tests/io/excel/test_xlrd.py +++ b/pandas/tests/io/excel/test_xlrd.py @@ -1,7 +1,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.excel import ExcelFile diff --git a/pandas/tests/io/excel/test_xlsxwriter.py b/pandas/tests/io/excel/test_xlsxwriter.py index deb72cc230669..b6f791434a92b 100644 --- a/pandas/tests/io/excel/test_xlsxwriter.py +++ b/pandas/tests/io/excel/test_xlsxwriter.py @@ -3,7 +3,7 @@ import pytest from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.excel import ExcelWriter diff --git a/pandas/tests/io/excel/test_xlwt.py b/pandas/tests/io/excel/test_xlwt.py index c6af78c2704d8..01feab08eb5e3 100644 --- a/pandas/tests/io/excel/test_xlwt.py +++ b/pandas/tests/io/excel/test_xlwt.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.excel import ExcelWriter, _XlwtWriter diff --git a/pandas/tests/io/formats/test_css.py b/pandas/tests/io/formats/test_css.py index 0d18b57c6d6e5..7008cef7b28fa 100644 --- a/pandas/tests/io/formats/test_css.py +++ b/pandas/tests/io/formats/test_css.py @@ -1,6 +1,6 @@ import pytest -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.formats.css import CSSResolver, CSSWarning diff --git a/pandas/tests/io/formats/test_eng_formatting.py b/pandas/tests/io/formats/test_eng_formatting.py index 2edbff3766c9d..6801316ada8a3 100644 --- a/pandas/tests/io/formats/test_eng_formatting.py +++ b/pandas/tests/io/formats/test_eng_formatting.py @@ -2,7 +2,7 @@ import pandas as pd from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm import pandas.io.formats.format as fmt diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 3b9deeca54af9..b70a006ca3603 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -35,7 +35,7 @@ reset_option, set_option, ) -import pandas.util.testing as tm +import pandas._testing as tm import pandas.io.formats.format as fmt import pandas.io.formats.printing as printing diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index f3fe3debdd217..d4c5414c301f9 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm jinja2 = pytest.importorskip("jinja2") from pandas.io.formats.style import Styler, _get_level_lengths # noqa # isort:skip diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 41cb7a73e4224..a211ac11cf725 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import DataFrame, compat -import pandas.util.testing as tm +import pandas._testing as tm class TestToCSV: diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py index a6c673e8c51d6..883240b74c32c 100644 --- a/pandas/tests/io/formats/test_to_excel.py +++ b/pandas/tests/io/formats/test_to_excel.py @@ -5,7 +5,7 @@ import pytest -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.formats.css import CSSWarning from pandas.io.formats.excel import CSSToExcelConverter diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index a2a577a0753f7..d6e0b53d4c176 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, option_context -import pandas.util.testing as tm +import pandas._testing as tm import pandas.io.formats.format as fmt diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index ea8688517bd93..bd681032f155d 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestToLatex: diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index 5c5c04c35d6b7..182c21ed1d416 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -3,7 +3,7 @@ import pandas.util._test_decorators as td import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm def test_compression_roundtrip(compression): diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index fba74d8ebcf97..2ac2acc6748d1 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.json._table_schema import ( as_json_table_type, diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 46dd1e94aa739..efb95a0cb2a42 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -4,7 +4,7 @@ import pytest from pandas import DataFrame, Index, json_normalize -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.json._normalize import nested_to_record diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 532d5215be902..09d8a1d3f10ea 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -12,7 +12,7 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json -import pandas.util.testing as tm +import pandas._testing as tm _seriesd = tm.getSeriesData() _tsd = tm.getTimeSeriesData() diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py index 90da175855c34..e531457627342 100644 --- a/pandas/tests/io/json/test_readlines.py +++ b/pandas/tests/io/json/test_readlines.py @@ -4,7 +4,7 @@ import pandas as pd from pandas import DataFrame, read_json -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.json._json import JsonReader diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index dab2882499634..f50492c58a370 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -17,7 +17,7 @@ import pandas.compat as compat from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm def _clean_dict(d): diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index 77b52eb90d61f..2d11f619d508b 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -17,7 +17,7 @@ import pandas.util._test_decorators as td from pandas import DataFrame, concat -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/io/parser/test_comment.py b/pandas/tests/io/parser/test_comment.py index e1d422142ab0b..60e32d7c27200 100644 --- a/pandas/tests/io/parser/test_comment.py +++ b/pandas/tests/io/parser/test_comment.py @@ -8,7 +8,7 @@ import pytest from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("na_values", [None, ["NaN"]]) diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index c431cbc2171ca..1678b1ecf8700 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -18,7 +18,7 @@ from pandas.errors import DtypeWarning, EmptyDataError, ParserError from pandas import DataFrame, Index, MultiIndex, Series, compat, concat -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py index 9d0eab0b9a907..e2b6fdd3af2ff 100644 --- a/pandas/tests/io/parser/test_compression.py +++ b/pandas/tests/io/parser/test_compression.py @@ -9,7 +9,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture(params=[True, False]) diff --git a/pandas/tests/io/parser/test_converters.py b/pandas/tests/io/parser/test_converters.py index 2a3b1dc82fc59..88b400d9a11df 100644 --- a/pandas/tests/io/parser/test_converters.py +++ b/pandas/tests/io/parser/test_converters.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import DataFrame, Index -import pandas.util.testing as tm +import pandas._testing as tm def test_converters_type_must_be_dict(all_parsers): diff --git a/pandas/tests/io/parser/test_dialect.py b/pandas/tests/io/parser/test_dialect.py index dc10352bc6460..cc65def0fd096 100644 --- a/pandas/tests/io/parser/test_dialect.py +++ b/pandas/tests/io/parser/test_dialect.py @@ -11,7 +11,7 @@ from pandas.errors import ParserWarning from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/io/parser/test_dtypes.py b/pandas/tests/io/parser/test_dtypes.py index a68d46e8a6c15..2133f8116a95e 100644 --- a/pandas/tests/io/parser/test_dtypes.py +++ b/pandas/tests/io/parser/test_dtypes.py @@ -14,7 +14,7 @@ import pandas as pd from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("dtype", [str, object]) diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py index 214b93b6f0628..7dc106ef0c186 100644 --- a/pandas/tests/io/parser/test_header.py +++ b/pandas/tests/io/parser/test_header.py @@ -12,7 +12,7 @@ from pandas.errors import ParserError from pandas import DataFrame, Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_read_with_bad_header(all_parsers): diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py index 66e00f4eb6c1c..7361e2ca6868f 100644 --- a/pandas/tests/io/parser/test_index_col.py +++ b/pandas/tests/io/parser/test_index_col.py @@ -8,7 +8,7 @@ import pytest from pandas import DataFrame, Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("with_header", [True, False]) diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py index d144421090274..5c4e642115798 100644 --- a/pandas/tests/io/parser/test_mangle_dupes.py +++ b/pandas/tests/io/parser/test_mangle_dupes.py @@ -8,7 +8,7 @@ import pytest from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("kwargs", [dict(), dict(mangle_dupe_cols=True)]) diff --git a/pandas/tests/io/parser/test_multi_thread.py b/pandas/tests/io/parser/test_multi_thread.py index c94adf9da0bf3..64ccaf60ec230 100644 --- a/pandas/tests/io/parser/test_multi_thread.py +++ b/pandas/tests/io/parser/test_multi_thread.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm def _construct_dataframe(num_rows): diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py index 353d309a84823..c9a0889cdd8b7 100644 --- a/pandas/tests/io/parser/test_na_values.py +++ b/pandas/tests/io/parser/test_na_values.py @@ -10,7 +10,7 @@ from pandas._libs.parsers import STR_NA_VALUES from pandas import DataFrame, Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_string_nas(all_parsers): diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 57e2950b06ce8..b8d66874bc660 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -11,7 +11,7 @@ import pandas.util._test_decorators as td from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.parsers import read_csv diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 36391e19a102e..b01b22e811ee3 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -20,8 +20,8 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series +import pandas._testing as tm from pandas.core.indexes.datetimes import date_range -import pandas.util.testing as tm import pandas.io.date_converters as conv diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py index 5b381e43e3e19..7367b19b40dc3 100644 --- a/pandas/tests/io/parser/test_python_parser_only.py +++ b/pandas/tests/io/parser/test_python_parser_only.py @@ -13,7 +13,7 @@ from pandas.errors import ParserError from pandas import DataFrame, Index, MultiIndex -import pandas.util.testing as tm +import pandas._testing as tm def test_default_separator(python_parser_only): diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py index 94858226d0b44..14773dfbea20e 100644 --- a/pandas/tests/io/parser/test_quoting.py +++ b/pandas/tests/io/parser/test_quoting.py @@ -11,7 +11,7 @@ from pandas.errors import ParserError from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index 9ddaccc4d38b7..27aef2376e87d 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -12,7 +12,7 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.parsers import EmptyDataError, read_csv, read_fwf diff --git a/pandas/tests/io/parser/test_skiprows.py b/pandas/tests/io/parser/test_skiprows.py index d4f219d13ac53..fdccef1127c7e 100644 --- a/pandas/tests/io/parser/test_skiprows.py +++ b/pandas/tests/io/parser/test_skiprows.py @@ -12,7 +12,7 @@ from pandas.errors import EmptyDataError from pandas import DataFrame, Index -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("skiprows", [list(range(6)), 6]) diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index e34f1010d690e..8d5af85c20d33 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -12,7 +12,7 @@ from pandas._libs.parsers import TextReader from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.parsers import TextFileReader, read_csv diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 07ab41b47bf27..267fae760398a 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -12,7 +12,7 @@ from pandas.errors import ParserError -import pandas.util.testing as tm +import pandas._testing as tm import pandas.io.parsers as parsers from pandas.io.parsers import read_csv diff --git a/pandas/tests/io/parser/test_usecols.py b/pandas/tests/io/parser/test_usecols.py index 539fdf2470c51..979eb4702cc84 100644 --- a/pandas/tests/io/parser/test_usecols.py +++ b/pandas/tests/io/parser/test_usecols.py @@ -10,7 +10,7 @@ from pandas._libs.tslib import Timestamp from pandas import DataFrame, Index -import pandas.util.testing as tm +import pandas._testing as tm _msg_validate_usecols_arg = ( "'usecols' must either be list-like " diff --git a/pandas/tests/io/pytables/conftest.py b/pandas/tests/io/pytables/conftest.py index 6164f5d0722cc..214f95c6fb441 100644 --- a/pandas/tests/io/pytables/conftest.py +++ b/pandas/tests/io/pytables/conftest.py @@ -1,6 +1,6 @@ import pytest -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/io/pytables/test_compat.py b/pandas/tests/io/pytables/test_compat.py index a82e21532eddb..c7200385aa998 100644 --- a/pandas/tests/io/pytables/test_compat.py +++ b/pandas/tests/io/pytables/test_compat.py @@ -1,8 +1,8 @@ import pytest import pandas as pd +import pandas._testing as tm from pandas.tests.io.pytables.common import ensure_clean_path -import pandas.util.testing as tm tables = pytest.importorskip("tables") diff --git a/pandas/tests/io/pytables/test_complex.py b/pandas/tests/io/pytables/test_complex.py index 91ee1061a5ef1..543940e674dba 100644 --- a/pandas/tests/io/pytables/test_complex.py +++ b/pandas/tests/io/pytables/test_complex.py @@ -7,8 +7,8 @@ import pandas as pd from pandas import DataFrame, Series +import pandas._testing as tm from pandas.tests.io.pytables.common import ensure_clean_path, ensure_clean_store -import pandas.util.testing as tm from pandas.io.pytables import read_hdf diff --git a/pandas/tests/io/pytables/test_pytables_missing.py b/pandas/tests/io/pytables/test_pytables_missing.py index 4ceb80889c989..9adb0a6d227da 100644 --- a/pandas/tests/io/pytables/test_pytables_missing.py +++ b/pandas/tests/io/pytables/test_pytables_missing.py @@ -3,7 +3,7 @@ import pandas.util._test_decorators as td import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm @td.skip_if_installed("tables") diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index c8a8e738faa9c..41b96cea1538c 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -33,6 +33,7 @@ isna, timedelta_range, ) +import pandas._testing as tm from pandas.tests.io.pytables.common import ( _maybe_remove, create_tempfile, @@ -42,7 +43,6 @@ safe_remove, tables, ) -import pandas.util.testing as tm from pandas.io.pytables import ( ClosedFileError, diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py index 1acb0ac6e06d2..2bf22d982e5fe 100644 --- a/pandas/tests/io/pytables/test_timezones.py +++ b/pandas/tests/io/pytables/test_timezones.py @@ -7,12 +7,12 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex, Series, Timestamp, date_range +import pandas._testing as tm from pandas.tests.io.pytables.common import ( _maybe_remove, ensure_clean_path, ensure_clean_store, ) -import pandas.util.testing as tm def _compare_with_tz(a, b): diff --git a/pandas/tests/io/sas/test_sas.py b/pandas/tests/io/sas/test_sas.py index fcd2e0e35ad9e..5d2643c20ceb2 100644 --- a/pandas/tests/io/sas/test_sas.py +++ b/pandas/tests/io/sas/test_sas.py @@ -3,7 +3,7 @@ import pytest from pandas import read_sas -import pandas.util.testing as tm +import pandas._testing as tm class TestSas: diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index c89342627f796..62e9ac6929c8e 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -10,7 +10,7 @@ import pandas.util._test_decorators as td import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm # https://github.com/cython/cython/issues/1720 diff --git a/pandas/tests/io/sas/test_xport.py b/pandas/tests/io/sas/test_xport.py index a52b22122ba81..ee97f08ef9400 100644 --- a/pandas/tests/io/sas/test_xport.py +++ b/pandas/tests/io/sas/test_xport.py @@ -4,7 +4,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.sas.sasreader import read_sas diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 666dfd245acaa..a69e5556f3e85 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, get_option, read_clipboard -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.clipboard import PyperclipException, clipboard_get, clipboard_set diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 59d7f6f904337..a126f83164ce5 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -12,7 +12,7 @@ import pandas.util._test_decorators as td import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm import pandas.io.common as icom diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index e17a32cbc8b68..fb81e57912dac 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -6,7 +6,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm import pandas.io.common as icom diff --git a/pandas/tests/io/test_date_converters.py b/pandas/tests/io/test_date_converters.py index 2fa5e3b30d6af..cdb8eca02a3e5 100644 --- a/pandas/tests/io/test_date_converters.py +++ b/pandas/tests/io/test_date_converters.py @@ -2,7 +2,7 @@ import numpy as np -import pandas.util.testing as tm +import pandas._testing as tm import pandas.io.date_converters as conv diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index e06f2c31a2870..39d6bc69e7c00 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -5,7 +5,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.feather_format import read_feather, to_feather # noqa: E402 isort:skip diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py index 85ac56c8193a6..557a9d5c13987 100644 --- a/pandas/tests/io/test_gcs.py +++ b/pandas/tests/io/test_gcs.py @@ -5,8 +5,8 @@ import pytest from pandas import DataFrame, date_range, read_csv +import pandas._testing as tm from pandas.util import _test_decorators as td -import pandas.util.testing as tm from pandas.io.common import is_gcs_url diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 2bb412cf6eab5..626df839363cb 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -15,7 +15,7 @@ import pandas.util._test_decorators as td from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.common import file_path_to_url import pandas.io.html diff --git a/pandas/tests/io/test_orc.py b/pandas/tests/io/test_orc.py index 9f3ec274007d0..a1f9c6f6af51a 100644 --- a/pandas/tests/io/test_orc.py +++ b/pandas/tests/io/test_orc.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import read_orc -import pandas.util.testing as tm +import pandas._testing as tm pytest.importorskip("pyarrow", minversion="0.13.0") pytest.importorskip("pyarrow.orc") diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index fc3d55e110d69..f9bd0df4a9196 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -10,7 +10,7 @@ import pandas.util._test_decorators as td import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.parquet import ( FastParquetImpl, diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 3be966edef080..42f904b47a6ee 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -25,7 +25,7 @@ import pandas as pd from pandas import Index -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import Day, MonthEnd diff --git a/pandas/tests/io/test_spss.py b/pandas/tests/io/test_spss.py index ccf3167d49371..013f56f83c5ec 100644 --- a/pandas/tests/io/test_spss.py +++ b/pandas/tests/io/test_spss.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm pyreadstat = pytest.importorskip("pyreadstat") diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index d5c2b368a3c6a..45b3e839a08d1 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -41,7 +41,7 @@ to_datetime, to_timedelta, ) -import pandas.util.testing as tm +import pandas._testing as tm import pandas.io.sql as sql from pandas.io.sql import read_sql_query, read_sql_table diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index e8bc7f480fb1d..9e34abd43ad6b 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -12,8 +12,8 @@ from pandas.core.dtypes.common import is_categorical_dtype import pandas as pd +import pandas._testing as tm from pandas.core.frame import DataFrame, Series -import pandas.util.testing as tm from pandas.io.parsers import read_csv from pandas.io.stata import ( diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 86cb7fc57b225..9f43027836eb4 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -14,7 +14,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm """ diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 116d924f5a596..8ee279f0e1f38 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -10,8 +10,8 @@ import pandas.util._test_decorators as td from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range +import pandas._testing as tm from pandas.tests.plotting.common import TestPlotBase, _check_plot_works -import pandas.util.testing as tm import pandas.plotting as plotting diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 71a186dc2f3b0..8fdf86ddabafe 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -10,7 +10,7 @@ from pandas.compat.numpy import np_datetime64_compat from pandas import Index, Period, Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm from pandas.plotting import ( deregister_matplotlib_converters, diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 8456f095e5868..8f855fd0c6cff 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -9,12 +9,12 @@ import pandas.util._test_decorators as td from pandas import DataFrame, Index, NaT, Series, isna +import pandas._testing as tm from pandas.core.indexes.datetimes import bdate_range, date_range from pandas.core.indexes.period import Period, PeriodIndex, period_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.resample import DatetimeIndex from pandas.tests.plotting.common import TestPlotBase -import pandas.util.testing as tm from pandas.tseries.offsets import DateOffset diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index c2a289b2772ba..bdf37ac7e83a4 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -17,9 +17,9 @@ import pandas as pd from pandas import DataFrame, MultiIndex, PeriodIndex, Series, bdate_range, date_range +import pandas._testing as tm from pandas.core.arrays import integer_array from pandas.tests.plotting.common import TestPlotBase, _check_plot_works -import pandas.util.testing as tm from pandas.io.formats.printing import pprint_thing import pandas.plotting as plotting diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py index bb1747710fe18..8fec4bb134cb4 100644 --- a/pandas/tests/plotting/test_groupby.py +++ b/pandas/tests/plotting/test_groupby.py @@ -8,8 +8,8 @@ import pandas.util._test_decorators as td from pandas import DataFrame, Series +import pandas._testing as tm from pandas.tests.plotting.common import TestPlotBase -import pandas.util.testing as tm @td.skip_if_no_mpl diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 74d48c10ad9a0..50ebbc22f2739 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -9,8 +9,8 @@ import pandas.util._test_decorators as td from pandas import DataFrame, Series +import pandas._testing as tm from pandas.tests.plotting.common import TestPlotBase, _check_plot_works -import pandas.util.testing as tm @td.skip_if_no_mpl diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index eadcc12d8428c..c8aa1f23ccf1f 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -10,8 +10,8 @@ import pandas.util._test_decorators as td from pandas import DataFrame, Series +import pandas._testing as tm from pandas.tests.plotting.common import TestPlotBase, _check_plot_works -import pandas.util.testing as tm import pandas.plotting as plotting diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 61722d726b28b..8463f30bee8f0 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -14,8 +14,8 @@ import pandas as pd from pandas import DataFrame, Series, date_range +import pandas._testing as tm from pandas.tests.plotting.common import TestPlotBase, _check_plot_works -import pandas.util.testing as tm import pandas.plotting as plotting diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index d66472b1c2054..7400b049961d5 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -21,8 +21,8 @@ timedelta_range, to_timedelta, ) +import pandas._testing as tm from pandas.core import nanops -import pandas.util.testing as tm def get_objs(): diff --git a/pandas/tests/reductions/test_stat_reductions.py b/pandas/tests/reductions/test_stat_reductions.py index 432811b5a8264..59dbcb9ab9fa0 100644 --- a/pandas/tests/reductions/test_stat_reductions.py +++ b/pandas/tests/reductions/test_stat_reductions.py @@ -10,8 +10,8 @@ import pandas as pd from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray -import pandas.util.testing as tm class TestDatetimeLikeStatReductions: diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 02203f476af8e..476643bb3e497 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -5,12 +5,12 @@ import pandas as pd from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.groupby.groupby import DataError from pandas.core.groupby.grouper import Grouper from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import PeriodIndex, period_range from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range -import pandas.util.testing as tm # a fixture value can be overridden by the test parameter value. Note that the # value of the fixture can be overridden this way even if the test doesn't use diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index bcd7081d5b1a5..4860329718f54 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -10,11 +10,11 @@ import pandas as pd from pandas import DataFrame, Series, Timedelta, Timestamp, isna, notna +import pandas._testing as tm from pandas.core.groupby.grouper import Grouper from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import Period, period_range from pandas.core.resample import DatetimeIndex, _get_timestamp_range_edges -import pandas.util.testing as tm import pandas.tseries.offsets as offsets from pandas.tseries.offsets import BDay, Minute diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index 0ed0bf18a82ee..40226ab2fe9b0 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -10,11 +10,11 @@ import pandas as pd from pandas import DataFrame, Series, Timestamp +import pandas._testing as tm from pandas.core.indexes.base import InvalidIndexError from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import Period, PeriodIndex, period_range from pandas.core.resample import _get_period_range_edges -import pandas.util.testing as tm import pandas.tseries.offsets as offsets diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index 7852afcdbfea9..bc2d6df3755d5 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -6,8 +6,8 @@ import pandas as pd from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.indexes.datetimes import date_range -import pandas.util.testing as tm dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="Min") diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index 7efc6b0d466b9..95a7654a618b2 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -4,8 +4,8 @@ import pandas as pd from pandas import DataFrame, Series, Timestamp +import pandas._testing as tm from pandas.core.indexes.datetimes import date_range -import pandas.util.testing as tm test_frame = DataFrame( {"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}, @@ -18,7 +18,7 @@ def test_tab_complete_ipython6_warning(ip): code = dedent( """\ - import pandas.util.testing as tm + import pandas._testing as tm s = tm.makeTimeSeries() rs = s.resample("D") """ diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index 4c27d48cff6fd..3aa7765954634 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -6,9 +6,9 @@ import pandas as pd from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.groupby.grouper import Grouper from pandas.core.indexes.datetimes import date_range -import pandas.util.testing as tm test_series = Series(np.random.randn(1000), index=date_range("1/1/2000", periods=1000)) diff --git a/pandas/tests/resample/test_timedelta.py b/pandas/tests/resample/test_timedelta.py index 7a6ebf826ca4d..d1bcdc55cb509 100644 --- a/pandas/tests/resample/test_timedelta.py +++ b/pandas/tests/resample/test_timedelta.py @@ -4,8 +4,8 @@ import pandas as pd from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.indexes.timedeltas import timedelta_range -import pandas.util.testing as tm def test_asfreq_bug(): diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 091bdd8de2995..a660acb143433 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -6,8 +6,8 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, concat, merge +import pandas._testing as tm from pandas.tests.reshape.merge.test_merge import NGROUPS, N, get_test_data -import pandas.util.testing as tm a_ = np.array diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index e191bf67c51ca..8e0c4766056d3 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -25,10 +25,10 @@ TimedeltaIndex, UInt64Index, ) +import pandas._testing as tm from pandas.api.types import CategoricalDtype as CDT from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import MergeError, merge -import pandas.util.testing as tm N = 50 NGROUPS = 8 diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index b2e764c5463fa..9ec649cd6dde0 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -6,8 +6,8 @@ import pandas as pd from pandas import Timedelta, merge_asof, read_csv, to_datetime +import pandas._testing as tm from pandas.core.reshape.merge import MergeError -import pandas.util.testing as tm class TestAsOfMerge: diff --git a/pandas/tests/reshape/merge/test_merge_index_as_string.py b/pandas/tests/reshape/merge/test_merge_index_as_string.py index 4e0f570567c07..691f2549c0ece 100644 --- a/pandas/tests/reshape/merge/test_merge_index_as_string.py +++ b/pandas/tests/reshape/merge/test_merge_index_as_string.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/reshape/merge/test_merge_ordered.py b/pandas/tests/reshape/merge/test_merge_ordered.py index 6d6429fb4e6b5..e0063925a03e1 100644 --- a/pandas/tests/reshape/merge/test_merge_ordered.py +++ b/pandas/tests/reshape/merge/test_merge_ordered.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, merge_ordered -import pandas.util.testing as tm +import pandas._testing as tm class TestMergeOrdered: diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py index 8d5c18b71e7c2..1f78c1900d237 100644 --- a/pandas/tests/reshape/merge/test_multi.py +++ b/pandas/tests/reshape/merge/test_multi.py @@ -4,9 +4,9 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series +import pandas._testing as tm from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge -import pandas.util.testing as tm @pytest.fixture diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 05193c00f0649..6a26dc474afc8 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -27,9 +27,9 @@ isna, read_csv, ) +import pandas._testing as tm from pandas.core.construction import create_series_with_explicit_dtype from pandas.tests.extension.decimal import to_decimal -import pandas.util.testing as tm @pytest.fixture(params=[True, False]) diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py index 611c3272c123f..e52636d54ebe8 100644 --- a/pandas/tests/reshape/test_cut.py +++ b/pandas/tests/reshape/test_cut.py @@ -19,9 +19,9 @@ timedelta_range, to_datetime, ) +import pandas._testing as tm from pandas.api.types import CategoricalDtype as CDT import pandas.core.reshape.tile as tmod -import pandas.util.testing as tm def test_simple(): diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 2c03c48209fea..814325844cb4c 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, lreshape, melt, wide_to_long -import pandas.util.testing as tm +import pandas._testing as tm class TestMelt: diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 43da011ed7100..054af87b42411 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -15,9 +15,9 @@ concat, date_range, ) +import pandas._testing as tm from pandas.api.types import CategoricalDtype as CDT from pandas.core.reshape.pivot import crosstab, pivot_table -import pandas.util.testing as tm @pytest.fixture(params=[True, False]) diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py index eca9b11bd4364..c5ca05056a306 100644 --- a/pandas/tests/reshape/test_qcut.py +++ b/pandas/tests/reshape/test_qcut.py @@ -18,9 +18,9 @@ qcut, timedelta_range, ) +import pandas._testing as tm from pandas.api.types import CategoricalDtype as CDT from pandas.core.algorithms import quantile -import pandas.util.testing as tm from pandas.tseries.offsets import Day, Nano diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index b695b05c7c7db..003c74566be71 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -7,8 +7,8 @@ import pandas as pd from pandas import Categorical, DataFrame, Index, Series, get_dummies +import pandas._testing as tm from pandas.core.arrays.sparse import SparseArray, SparseDtype -import pandas.util.testing as tm class TestGetDummies: diff --git a/pandas/tests/reshape/test_union_categoricals.py b/pandas/tests/reshape/test_union_categoricals.py index 9b56c4df6d7de..a503173bd74b1 100644 --- a/pandas/tests/reshape/test_union_categoricals.py +++ b/pandas/tests/reshape/test_union_categoricals.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Categorical, CategoricalIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestUnionCategoricals: diff --git a/pandas/tests/reshape/test_util.py b/pandas/tests/reshape/test_util.py index 60c6d7ec3017b..cd518dda4edbf 100644 --- a/pandas/tests/reshape/test_util.py +++ b/pandas/tests/reshape/test_util.py @@ -2,8 +2,8 @@ import pytest from pandas import Index, date_range +import pandas._testing as tm from pandas.core.reshape.util import cartesian_product -import pandas.util.testing as tm class TestCartesianProduct: diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 73371c48f9370..6af9c9884589c 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -16,7 +16,7 @@ import pandas as pd from pandas import NaT, Period, Timedelta, Timestamp, offsets -import pandas.util.testing as tm +import pandas._testing as tm class TestPeriodConstruction: diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py index 82d5b097733f1..384bf171738bc 100644 --- a/pandas/tests/scalar/test_na_scalar.py +++ b/pandas/tests/scalar/test_na_scalar.py @@ -6,7 +6,7 @@ from pandas.core.dtypes.common import is_scalar import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm def test_singleton(): diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index e709db980b721..b1594dee9bc34 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -21,9 +21,9 @@ Timestamp, isna, ) +import pandas._testing as tm from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.ops import roperator -import pandas.util.testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index fed613b910c55..3764d9b7548fc 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -9,8 +9,8 @@ import pandas as pd from pandas import NaT, Timedelta, Timestamp, offsets +import pandas._testing as tm from pandas.core import ops -import pandas.util.testing as tm class TestTimedeltaAdditionSubtraction: diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index b988a72fd2684..e1d965bbb14e9 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import Series, Timedelta, TimedeltaIndex, timedelta_range, to_timedelta -import pandas.util.testing as tm +import pandas._testing as tm class TestTimedeltaArithmetic: diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 25609cb852ed4..18a8d4b4ad708 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -19,7 +19,7 @@ import pandas.util._test_decorators as td from pandas import NaT, Period, Timedelta, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries import offsets diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index db63e0bf9cd30..65066fd0099ba 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -10,7 +10,7 @@ import pandas.util._test_decorators as td from pandas import NaT, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.frequencies import to_offset diff --git a/pandas/tests/series/conftest.py b/pandas/tests/series/conftest.py index 18d3c87a01f87..ff0b0c71f88b0 100644 --- a/pandas/tests/series/conftest.py +++ b/pandas/tests/series/conftest.py @@ -1,6 +1,6 @@ import pytest -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py index c47b99fa38989..47f40e24e1637 100644 --- a/pandas/tests/series/indexing/test_alter_index.py +++ b/pandas/tests/series/indexing/test_alter_index.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Categorical, Series, date_range, isna -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index 925d657d7dd04..e3acc0f5d6457 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -5,8 +5,8 @@ import pandas as pd from pandas import Index, Series, Timestamp, date_range, isna +import pandas._testing as tm from pandas.core.indexing import IndexingError -import pandas.util.testing as tm from pandas.tseries.offsets import BDay diff --git a/pandas/tests/series/indexing/test_callable.py b/pandas/tests/series/indexing/test_callable.py index 2d879eed967e5..fe575cf146641 100644 --- a/pandas/tests/series/indexing/test_callable.py +++ b/pandas/tests/series/indexing/test_callable.py @@ -1,5 +1,5 @@ import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm def test_getitem_callable(): diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 83c1c0ff16f4c..15ff5f6b343d1 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm """ diff --git a/pandas/tests/series/indexing/test_iloc.py b/pandas/tests/series/indexing/test_iloc.py index eef4d89af3832..f276eb5b0b23d 100644 --- a/pandas/tests/series/indexing/test_iloc.py +++ b/pandas/tests/series/indexing/test_iloc.py @@ -1,7 +1,7 @@ import numpy as np from pandas import Series -import pandas.util.testing as tm +import pandas._testing as tm def test_iloc(): diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 5bebd480ce8d4..e62963bcfb5f9 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import Categorical, DataFrame, MultiIndex, Series, Timedelta, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import BDay diff --git a/pandas/tests/series/indexing/test_loc.py b/pandas/tests/series/indexing/test_loc.py index e6b5b5df2b000..7d6b6c78cc492 100644 --- a/pandas/tests/series/indexing/test_loc.py +++ b/pandas/tests/series/indexing/test_loc.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Series, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("val,expected", [(2 ** 63 - 1, 3), (2 ** 63, 4)]) diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py index a641b47f2e690..ce0d04ff99077 100644 --- a/pandas/tests/series/indexing/test_numeric.py +++ b/pandas/tests/series/indexing/test_numeric.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, Index, Series -import pandas.util.testing as tm +import pandas._testing as tm def test_get(): diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py index ec357786f18fb..dc0fca4bba067 100644 --- a/pandas/tests/series/methods/test_append.py +++ b/pandas/tests/series/methods/test_append.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex, Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesAppend: diff --git a/pandas/tests/series/methods/test_argsort.py b/pandas/tests/series/methods/test_argsort.py index 9dd3045ad86d9..1fc98ded0d3d2 100644 --- a/pandas/tests/series/methods/test_argsort.py +++ b/pandas/tests/series/methods/test_argsort.py @@ -2,7 +2,7 @@ import pytest from pandas import Series, Timestamp, isna -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesArgsort: diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py index 8bc9e9c38d83a..b121efd202744 100644 --- a/pandas/tests/series/methods/test_asof.py +++ b/pandas/tests/series/methods/test_asof.py @@ -2,7 +2,7 @@ import pytest from pandas import Series, Timestamp, date_range, isna, notna, offsets -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesAsof: diff --git a/pandas/tests/series/methods/test_clip.py b/pandas/tests/series/methods/test_clip.py index c2bec2744583a..37764d3b82c2d 100644 --- a/pandas/tests/series/methods/test_clip.py +++ b/pandas/tests/series/methods/test_clip.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Series, Timestamp, isna, notna -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesClip: diff --git a/pandas/tests/series/methods/test_count.py b/pandas/tests/series/methods/test_count.py index 9cf776c0d9f1a..1ca48eeb7c441 100644 --- a/pandas/tests/series/methods/test_count.py +++ b/pandas/tests/series/methods/test_count.py @@ -2,7 +2,7 @@ import pandas as pd from pandas import Categorical, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesCount: diff --git a/pandas/tests/series/methods/test_cov_corr.py b/pandas/tests/series/methods/test_cov_corr.py index f7dae87018419..1f6033d435323 100644 --- a/pandas/tests/series/methods/test_cov_corr.py +++ b/pandas/tests/series/methods/test_cov_corr.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Series, isna -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesCov: diff --git a/pandas/tests/series/methods/test_describe.py b/pandas/tests/series/methods/test_describe.py index ed412e7da3d43..b147a04b11090 100644 --- a/pandas/tests/series/methods/test_describe.py +++ b/pandas/tests/series/methods/test_describe.py @@ -1,7 +1,7 @@ import numpy as np from pandas import Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesDescribe: diff --git a/pandas/tests/series/methods/test_diff.py b/pandas/tests/series/methods/test_diff.py index 9cb4ec827a271..033f75e95f11b 100644 --- a/pandas/tests/series/methods/test_diff.py +++ b/pandas/tests/series/methods/test_diff.py @@ -2,7 +2,7 @@ import pytest from pandas import Series, TimedeltaIndex, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesDiff: diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py index 2c5dcd2c45171..2d052505d5ecc 100644 --- a/pandas/tests/series/methods/test_drop_duplicates.py +++ b/pandas/tests/series/methods/test_drop_duplicates.py @@ -2,7 +2,7 @@ import pytest from pandas import Categorical, Series -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/series/methods/test_duplicated.py b/pandas/tests/series/methods/test_duplicated.py index 36b3b559477a6..5cc297913e851 100644 --- a/pandas/tests/series/methods/test_duplicated.py +++ b/pandas/tests/series/methods/test_duplicated.py @@ -2,7 +2,7 @@ import pytest from pandas import Series -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/series/methods/test_explode.py b/pandas/tests/series/methods/test_explode.py index e79d3c0556cf1..979199e1efc62 100644 --- a/pandas/tests/series/methods/test_explode.py +++ b/pandas/tests/series/methods/test_explode.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm def test_basic(): diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py index 69b2f896aec52..ca93e989ba6b5 100644 --- a/pandas/tests/series/methods/test_isin.py +++ b/pandas/tests/series/methods/test_isin.py @@ -3,7 +3,7 @@ import pandas as pd from pandas import Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesIsIn: diff --git a/pandas/tests/series/methods/test_nlargest.py b/pandas/tests/series/methods/test_nlargest.py index 423b4ad78a78a..a029965c7394f 100644 --- a/pandas/tests/series/methods/test_nlargest.py +++ b/pandas/tests/series/methods/test_nlargest.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import Series -import pandas.util.testing as tm +import pandas._testing as tm main_dtypes = [ "datetime", diff --git a/pandas/tests/series/methods/test_pct_change.py b/pandas/tests/series/methods/test_pct_change.py index aa01543132841..1efb57894f986 100644 --- a/pandas/tests/series/methods/test_pct_change.py +++ b/pandas/tests/series/methods/test_pct_change.py @@ -2,7 +2,7 @@ import pytest from pandas import Series, date_range -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesPctChange: diff --git a/pandas/tests/series/methods/test_quantile.py b/pandas/tests/series/methods/test_quantile.py index 4eb275d63e878..79f50afca658f 100644 --- a/pandas/tests/series/methods/test_quantile.py +++ b/pandas/tests/series/methods/test_quantile.py @@ -5,8 +5,8 @@ import pandas as pd from pandas import Index, Series +import pandas._testing as tm from pandas.core.indexes.datetimes import Timestamp -import pandas.util.testing as tm class TestSeriesQuantile: diff --git a/pandas/tests/series/methods/test_rank.py b/pandas/tests/series/methods/test_rank.py index 793e8b7da4965..1d538e06ccc4e 100644 --- a/pandas/tests/series/methods/test_rank.py +++ b/pandas/tests/series/methods/test_rank.py @@ -8,8 +8,8 @@ import pandas.util._test_decorators as td from pandas import NaT, Series, Timestamp, date_range +import pandas._testing as tm from pandas.api.types import CategoricalDtype -import pandas.util.testing as tm class TestSeriesRank: diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index 4125b5816422a..b20baa2836363 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesReplace: diff --git a/pandas/tests/series/methods/test_round.py b/pandas/tests/series/methods/test_round.py index 1776468ef5a83..7f0711a0f30d7 100644 --- a/pandas/tests/series/methods/test_round.py +++ b/pandas/tests/series/methods/test_round.py @@ -2,7 +2,7 @@ import pytest from pandas import Series -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesRound: diff --git a/pandas/tests/series/methods/test_searchsorted.py b/pandas/tests/series/methods/test_searchsorted.py index 0d6e9635579f0..fd6c6f74a9136 100644 --- a/pandas/tests/series/methods/test_searchsorted.py +++ b/pandas/tests/series/methods/test_searchsorted.py @@ -1,8 +1,8 @@ import numpy as np from pandas import Series, Timestamp, date_range +import pandas._testing as tm from pandas.api.types import is_scalar -import pandas.util.testing as tm class TestSeriesSearchSorted: diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py index 2cf847c928862..8256e2f33b936 100644 --- a/pandas/tests/series/methods/test_shift.py +++ b/pandas/tests/series/methods/test_shift.py @@ -13,7 +13,7 @@ date_range, offsets, ) -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import BDay diff --git a/pandas/tests/series/methods/test_sort_index.py b/pandas/tests/series/methods/test_sort_index.py index a9b73c2344681..1f1de7f3e420a 100644 --- a/pandas/tests/series/methods/test_sort_index.py +++ b/pandas/tests/series/methods/test_sort_index.py @@ -4,7 +4,7 @@ import pytest from pandas import IntervalIndex, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesSortIndex: diff --git a/pandas/tests/series/methods/test_sort_values.py b/pandas/tests/series/methods/test_sort_values.py index 2cea6f061de76..1ced9cab0791e 100644 --- a/pandas/tests/series/methods/test_sort_values.py +++ b/pandas/tests/series/methods/test_sort_values.py @@ -2,7 +2,7 @@ import pytest from pandas import Categorical, DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesSortValues: diff --git a/pandas/tests/series/methods/test_to_dict.py b/pandas/tests/series/methods/test_to_dict.py index 0f1359f99e594..2fbf3e8d39cf3 100644 --- a/pandas/tests/series/methods/test_to_dict.py +++ b/pandas/tests/series/methods/test_to_dict.py @@ -3,7 +3,7 @@ import pytest from pandas import Series -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesToDict: diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py index b2bf5e854fbcc..d4e2890ed8bf0 100644 --- a/pandas/tests/series/methods/test_truncate.py +++ b/pandas/tests/series/methods/test_truncate.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import BDay diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py index 15d895f44c7b2..fdb35befeb0c2 100644 --- a/pandas/tests/series/methods/test_value_counts.py +++ b/pandas/tests/series/methods/test_value_counts.py @@ -2,7 +2,7 @@ import pandas as pd from pandas import Categorical, CategoricalIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesValueCounts: diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 9e1bae8469138..62ff0a075d2ca 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -4,7 +4,7 @@ import pytest from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesAlterAxes: diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index a88043c7777c4..c29bd3ea0cb7d 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesAnalytics: diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 89a60d371770a..32587c6afee73 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -19,8 +19,8 @@ period_range, timedelta_range, ) +import pandas._testing as tm from pandas.core.arrays import PeriodArray -import pandas.util.testing as tm import pandas.io.formats.printing as printing diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 37bedc1ab7508..a997e2e0a3ab9 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -6,9 +6,9 @@ import pandas as pd from pandas import DataFrame, Index, Series, isna +import pandas._testing as tm from pandas.conftest import _get_cython_table_params from pandas.core.base import SpecificationError -import pandas.util.testing as tm class TestSeriesApply: diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 68d6169fa4f34..edb1d2d98fa2e 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -5,8 +5,8 @@ import pandas as pd from pandas import Series +import pandas._testing as tm from pandas.core.indexes.period import IncompatibleFrequency -import pandas.util.testing as tm def _permute(obj): diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py index 238a413af7a31..239353d3955b4 100644 --- a/pandas/tests/series/test_combine_concat.py +++ b/pandas/tests/series/test_combine_concat.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesCombine: diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 1c3f1404215d3..a36b1de2ba04e 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -26,8 +26,8 @@ period_range, timedelta_range, ) +import pandas._testing as tm from pandas.core.arrays import period_array -import pandas.util.testing as tm class TestSeriesConstructors: diff --git a/pandas/tests/series/test_cumulative.py b/pandas/tests/series/test_cumulative.py index f72206e42403c..885b5bf0476f2 100644 --- a/pandas/tests/series/test_cumulative.py +++ b/pandas/tests/series/test_cumulative.py @@ -11,7 +11,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm def _check_accum_op(name, series, check_dtype=True): diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index aa56131f05570..b8be4ea137e3d 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -24,9 +24,9 @@ period_range, timedelta_range, ) +import pandas._testing as tm from pandas.core.arrays import PeriodArray import pandas.core.common as com -import pandas.util.testing as tm class TestSeriesDatetimeValues: diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 69e34a4d97006..4bf2f1bd82eff 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -20,7 +20,7 @@ Timestamp, date_range, ) -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesDtypes: diff --git a/pandas/tests/series/test_duplicates.py b/pandas/tests/series/test_duplicates.py index 57d919ccb89ec..3513db6177951 100644 --- a/pandas/tests/series/test_duplicates.py +++ b/pandas/tests/series/test_duplicates.py @@ -2,8 +2,8 @@ import pytest from pandas import Categorical, Series +import pandas._testing as tm from pandas.core.construction import create_series_with_explicit_dtype -import pandas.util.testing as tm def test_nunique(): diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py index efcb500a0b79f..4c817ed2e2d59 100644 --- a/pandas/tests/series/test_internals.py +++ b/pandas/tests/series/test_internals.py @@ -5,8 +5,8 @@ import pandas as pd from pandas import NaT, Series, Timestamp +import pandas._testing as tm from pandas.core.internals.blocks import IntBlock -import pandas.util.testing as tm class TestSeriesInternals: diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index f53081ac53b01..510c11a51ca38 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm from pandas.io.common import get_handle diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index c49cd6930781e..128aea84fc967 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -21,7 +21,7 @@ date_range, isna, ) -import pandas.util.testing as tm +import pandas._testing as tm def _simple_ts(start, end, freq="D"): diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 06fe64d69fb6b..bdd9f92d92d3f 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -6,9 +6,9 @@ import pandas as pd from pandas import Categorical, DataFrame, Index, Series, bdate_range, date_range, isna +import pandas._testing as tm from pandas.core import ops import pandas.core.nanops as nanops -import pandas.util.testing as tm class TestSeriesLogicalOps: diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index 4aeb211170d8f..03fee389542e3 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -3,8 +3,8 @@ import pandas as pd from pandas import DataFrame, Period, Series, period_range +import pandas._testing as tm from pandas.core.arrays import PeriodArray -import pandas.util.testing as tm class TestSeriesPeriod: diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index 008ae50e4cde5..64a8c4569406e 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -15,7 +15,7 @@ period_range, timedelta_range, ) -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesRepr: diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py index 5e2d23a70e5be..73247bbf8b3d6 100644 --- a/pandas/tests/series/test_subclass.py +++ b/pandas/tests/series/test_subclass.py @@ -1,4 +1,4 @@ -import pandas.util.testing as tm +import pandas._testing as tm class TestSeriesSubclassing: diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index b317a2ee6f018..9f6cf155c7c51 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -21,7 +21,7 @@ timedelta_range, to_datetime, ) -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.offsets import BDay, BMonthEnd diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index 5e255e7cd5dcd..a363f927d10a9 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -11,8 +11,8 @@ from pandas._libs.tslibs import conversion, timezones from pandas import DatetimeIndex, Index, NaT, Series, Timestamp +import pandas._testing as tm from pandas.core.indexes.datetimes import date_range -import pandas.util.testing as tm class TestSeriesTimezones: diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py index 120eaeaf785b0..f3c3dd876d87a 100644 --- a/pandas/tests/series/test_ufunc.py +++ b/pandas/tests/series/test_ufunc.py @@ -5,7 +5,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm UNARY_UFUNCS = [np.positive, np.floor, np.exp] BINARY_UFUNCS = [np.add, np.logaddexp] # dunder op diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 82f647c9385b2..2b46f86d49c5e 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -30,11 +30,11 @@ Timestamp, compat, ) +import pandas._testing as tm from pandas.conftest import BYTES_DTYPES, STRING_DTYPES import pandas.core.algorithms as algos from pandas.core.arrays import DatetimeArray import pandas.core.common as com -import pandas.util.testing as tm class TestFactorize: diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 12d834131f71b..ee006233c4c1b 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -9,7 +9,7 @@ import pytest from pandas import DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm def import_module(name): diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 9808c3d78b436..fadab5d821470 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -5,9 +5,9 @@ from numpy.random import randn import pytest +import pandas._testing as tm from pandas.core.api import DataFrame from pandas.core.computation import expressions as expr -import pandas.util.testing as tm _frame = DataFrame(randn(10000, 4), columns=list("ABCD"), dtype="float64") _frame2 = DataFrame(randn(100, 4), columns=list("ABCD"), dtype="float64") diff --git a/pandas/tests/test_join.py b/pandas/tests/test_join.py index 8940a82b33777..129dc275c4d5a 100644 --- a/pandas/tests/test_join.py +++ b/pandas/tests/test_join.py @@ -4,7 +4,7 @@ from pandas._libs import join as _join from pandas import Categorical, DataFrame, Index, merge -import pandas.util.testing as tm +import pandas._testing as tm class TestIndexer: diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index 77841f0bb9f0d..f839aa198d03f 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -4,7 +4,7 @@ from pandas._libs import lib, writers as libwriters from pandas import Index -import pandas.util.testing as tm +import pandas._testing as tm class TestMisc: diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 39c122addd8b1..2e8fe0c266f6b 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -12,7 +12,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, isna -import pandas.util.testing as tm +import pandas._testing as tm AGG_FUNCTIONS = [ "sum", diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index c207c803510ca..b2bccbeb82c27 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -11,9 +11,9 @@ import pandas as pd from pandas import Series, isna +import pandas._testing as tm from pandas.core.arrays import DatetimeArray import pandas.core.nanops as nanops -import pandas.util.testing as tm use_bn = nanops._USE_BOTTLENECK has_c16 = hasattr(np, "complex128") diff --git a/pandas/tests/test_optional_dependency.py b/pandas/tests/test_optional_dependency.py index cd154ed5fe570..ce527214e55e7 100644 --- a/pandas/tests/test_optional_dependency.py +++ b/pandas/tests/test_optional_dependency.py @@ -5,7 +5,7 @@ from pandas.compat._optional import VERSIONS, import_optional_dependency -import pandas.util.testing as tm +import pandas._testing as tm def test_import_optional(): diff --git a/pandas/tests/test_register_accessor.py b/pandas/tests/test_register_accessor.py index 6b40ff8b3fa1e..08a5581886522 100644 --- a/pandas/tests/test_register_accessor.py +++ b/pandas/tests/test_register_accessor.py @@ -3,7 +3,7 @@ import pytest import pandas as pd -import pandas.util.testing as tm +import pandas._testing as tm @contextlib.contextmanager diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 90cd9cc3e006d..98297474243e4 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -6,6 +6,7 @@ import pytest from pandas import DataFrame, MultiIndex, Series, array, concat, merge +import pandas._testing as tm from pandas.core.algorithms import safe_sort import pandas.core.common as com from pandas.core.sorting import ( @@ -15,7 +16,6 @@ lexsort_indexer, nargsort, ) -import pandas.util.testing as tm class TestSorting: diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index ae7ab6addc3fb..7f3375070d7d9 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -8,8 +8,8 @@ from pandas._libs import lib from pandas import DataFrame, Index, MultiIndex, Series, concat, isna, notna +import pandas._testing as tm import pandas.core.strings as strings -import pandas.util.testing as tm def assert_series_or_index_equal(left, right): diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py index d2a9e1dc94bb5..465296a6f9e51 100644 --- a/pandas/tests/test_take.py +++ b/pandas/tests/test_take.py @@ -6,8 +6,8 @@ from pandas._libs.tslib import iNaT +import pandas._testing as tm import pandas.core.algorithms as algos -import pandas.util.testing as tm @pytest.fixture(params=[True, False]) diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py index 082277796e602..2fd39d5a7b703 100644 --- a/pandas/tests/tools/test_numeric.py +++ b/pandas/tests/tools/test_numeric.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, to_numeric -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture(params=[None, "ignore", "raise", "coerce"]) diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py index 250c37cdadbe4..c4660417599a8 100644 --- a/pandas/tests/tseries/frequencies/test_inference.py +++ b/pandas/tests/tseries/frequencies/test_inference.py @@ -8,8 +8,8 @@ from pandas.compat import is_platform_windows from pandas import DatetimeIndex, Index, Series, Timestamp, date_range, period_range +import pandas._testing as tm from pandas.core.tools.datetimes import to_datetime -import pandas.util.testing as tm import pandas.tseries.frequencies as frequencies import pandas.tseries.offsets as offsets diff --git a/pandas/tests/tseries/holiday/test_calendar.py b/pandas/tests/tseries/holiday/test_calendar.py index c122f92ed228c..5b4a7c74b1af1 100644 --- a/pandas/tests/tseries/holiday/test_calendar.py +++ b/pandas/tests/tseries/holiday/test_calendar.py @@ -3,7 +3,7 @@ import pytest from pandas import DatetimeIndex, offsets, to_datetime -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.holiday import ( AbstractHolidayCalendar, diff --git a/pandas/tests/tseries/holiday/test_holiday.py b/pandas/tests/tseries/holiday/test_holiday.py index 7748b965f8962..a2c146dbd65e8 100644 --- a/pandas/tests/tseries/holiday/test_holiday.py +++ b/pandas/tests/tseries/holiday/test_holiday.py @@ -3,7 +3,7 @@ import pytest from pytz import utc -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.holiday import ( MO, diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py index 9ab722b866c76..5686119593e18 100644 --- a/pandas/tests/tseries/offsets/test_fiscal.py +++ b/pandas/tests/tseries/offsets/test_fiscal.py @@ -9,7 +9,7 @@ from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG from pandas import Timestamp -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries.frequencies import get_offset from pandas.tseries.offsets import FY5253, FY5253Quarter diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index fcabc0bee85b6..e98699f6b4ec9 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -22,9 +22,9 @@ from pandas.compat.numpy import np_datetime64_compat from pandas.errors import PerformanceWarning +import pandas._testing as tm from pandas.core.indexes.datetimes import DatetimeIndex, _to_M8, date_range from pandas.core.series import Series -import pandas.util.testing as tm from pandas.io.pickle import read_pickle from pandas.tseries.frequencies import _get_offset, _offset_map diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index 2914d4ddf0da0..297e5c3178379 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -8,7 +8,7 @@ import pytest from pandas import Timedelta, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm from pandas.tseries import offsets from pandas.tseries.offsets import Hour, Micro, Milli, Minute, Nano, Second diff --git a/pandas/tests/tslibs/test_array_to_datetime.py b/pandas/tests/tslibs/test_array_to_datetime.py index 5cf2165993cd7..a40fcd725d604 100644 --- a/pandas/tests/tslibs/test_array_to_datetime.py +++ b/pandas/tests/tslibs/test_array_to_datetime.py @@ -9,7 +9,7 @@ from pandas.compat.numpy import np_array_datetime64_compat from pandas import Timestamp -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index 6c30e2b6c7a1c..2beeae85de683 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -8,7 +8,7 @@ from pandas._libs.tslibs import conversion, timezones, tzconversion from pandas import Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm def _compare_utc_to_local(tz_didx): diff --git a/pandas/tests/tslibs/test_fields.py b/pandas/tests/tslibs/test_fields.py index cd729956a027c..943f4207df543 100644 --- a/pandas/tests/tslibs/test_fields.py +++ b/pandas/tests/tslibs/test_fields.py @@ -2,7 +2,7 @@ from pandas._libs.tslibs import fields -import pandas.util.testing as tm +import pandas._testing as tm def test_fields_readonly(): diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py index 0bc30347b3fa9..36f7ada7326bf 100644 --- a/pandas/tests/tslibs/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -11,7 +11,7 @@ from pandas._libs.tslibs.parsing import parse_time_string import pandas.util._test_decorators as td -import pandas.util.testing as tm +import pandas._testing as tm def test_parse_time_string(): diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py index f430e2893ca33..ffa49f6ba442b 100644 --- a/pandas/tests/util/test_assert_almost_equal.py +++ b/pandas/tests/util/test_assert_almost_equal.py @@ -2,7 +2,7 @@ import pytest from pandas import DataFrame, Index, Series, Timestamp -import pandas.util.testing as tm +import pandas._testing as tm def _assert_almost_equal_both(a, b, **kwargs): diff --git a/pandas/tests/util/test_assert_categorical_equal.py b/pandas/tests/util/test_assert_categorical_equal.py index 44400498ddc64..6ae16cee89ee3 100644 --- a/pandas/tests/util/test_assert_categorical_equal.py +++ b/pandas/tests/util/test_assert_categorical_equal.py @@ -1,7 +1,7 @@ import pytest from pandas import Categorical -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/util/test_assert_extension_array_equal.py b/pandas/tests/util/test_assert_extension_array_equal.py index cecf9273004d7..320d5a4de9d08 100644 --- a/pandas/tests/util/test_assert_extension_array_equal.py +++ b/pandas/tests/util/test_assert_extension_array_equal.py @@ -1,8 +1,8 @@ import numpy as np import pytest +import pandas._testing as tm from pandas.core.arrays.sparse import SparseArray -import pandas.util.testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index b46a8460a28b2..0aa450f5f96d1 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -1,7 +1,7 @@ import pytest from pandas import DataFrame -import pandas.util.testing as tm +import pandas._testing as tm @pytest.fixture(params=[True, False]) diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py index 8c3f242f0c96b..9257e52fe34ab 100644 --- a/pandas/tests/util/test_assert_index_equal.py +++ b/pandas/tests/util/test_assert_index_equal.py @@ -2,7 +2,7 @@ import pytest from pandas import Categorical, Index, MultiIndex, NaT -import pandas.util.testing as tm +import pandas._testing as tm def test_index_equal_levels_mismatch(): diff --git a/pandas/tests/util/test_assert_interval_array_equal.py b/pandas/tests/util/test_assert_interval_array_equal.py index b264b484a04ab..96f2973a1528c 100644 --- a/pandas/tests/util/test_assert_interval_array_equal.py +++ b/pandas/tests/util/test_assert_interval_array_equal.py @@ -1,7 +1,7 @@ import pytest from pandas import interval_range -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize( diff --git a/pandas/tests/util/test_assert_numpy_array_equal.py b/pandas/tests/util/test_assert_numpy_array_equal.py index 53bcedf3a16f1..a6b32610f592d 100644 --- a/pandas/tests/util/test_assert_numpy_array_equal.py +++ b/pandas/tests/util/test_assert_numpy_array_equal.py @@ -2,7 +2,7 @@ import pytest from pandas import Timestamp -import pandas.util.testing as tm +import pandas._testing as tm def test_assert_numpy_array_equal_shape_mismatch(): diff --git a/pandas/tests/util/test_assert_produces_warning.py b/pandas/tests/util/test_assert_produces_warning.py index c681817896903..87765c909938d 100644 --- a/pandas/tests/util/test_assert_produces_warning.py +++ b/pandas/tests/util/test_assert_produces_warning.py @@ -2,7 +2,7 @@ import pytest -import pandas.util.testing as tm +import pandas._testing as tm def f(): diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index 0a6047c4662ba..eaf0824f52927 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -1,7 +1,7 @@ import pytest from pandas import Categorical, DataFrame, Series -import pandas.util.testing as tm +import pandas._testing as tm def _assert_series_equal_both(a, b, **kwargs): diff --git a/pandas/tests/util/test_deprecate.py b/pandas/tests/util/test_deprecate.py index 8fbc8037ed7c5..ee4f7e3f34f2e 100644 --- a/pandas/tests/util/test_deprecate.py +++ b/pandas/tests/util/test_deprecate.py @@ -4,7 +4,7 @@ from pandas.util._decorators import deprecate -import pandas.util.testing as tm +import pandas._testing as tm def new_func(): diff --git a/pandas/tests/util/test_deprecate_kwarg.py b/pandas/tests/util/test_deprecate_kwarg.py index c17c48197ccf7..b165e9fba0e4f 100644 --- a/pandas/tests/util/test_deprecate_kwarg.py +++ b/pandas/tests/util/test_deprecate_kwarg.py @@ -2,7 +2,7 @@ from pandas.util._decorators import deprecate_kwarg -import pandas.util.testing as tm +import pandas._testing as tm @deprecate_kwarg("old", "new") diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index ee9c4ed12bd92..c915edad4bb8e 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -5,9 +5,9 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series +import pandas._testing as tm from pandas.core.util.hashing import _hash_scalar, hash_tuple, hash_tuples from pandas.util import hash_array, hash_pandas_object -import pandas.util.testing as tm @pytest.fixture( diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index 60124c8e943ad..6a19adef728e4 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -4,7 +4,7 @@ import pandas.compat as compat -import pandas.util.testing as tm +import pandas._testing as tm def test_rands(): diff --git a/pandas/tests/window/common.py b/pandas/tests/window/common.py index 515e918b4b671..6aeada3152dbb 100644 --- a/pandas/tests/window/common.py +++ b/pandas/tests/window/common.py @@ -4,7 +4,7 @@ from numpy.random import randn from pandas import DataFrame, Series, bdate_range, notna -import pandas.util.testing as tm +import pandas._testing as tm N, K = 100, 10 diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py index 2519fe82f00a6..599761259e041 100644 --- a/pandas/tests/window/moments/test_moments_ewm.py +++ b/pandas/tests/window/moments/test_moments_ewm.py @@ -4,6 +4,7 @@ import pandas as pd from pandas import DataFrame, Series, concat +import pandas._testing as tm from pandas.tests.window.common import ( Base, ConsistencyBase, @@ -11,7 +12,6 @@ check_binary_ew_min_periods, ew_func, ) -import pandas.util.testing as tm @pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning") diff --git a/pandas/tests/window/moments/test_moments_expanding.py b/pandas/tests/window/moments/test_moments_expanding.py index 507fd2e2fb3ba..4596552d8f255 100644 --- a/pandas/tests/window/moments/test_moments_expanding.py +++ b/pandas/tests/window/moments/test_moments_expanding.py @@ -5,8 +5,8 @@ import pytest from pandas import DataFrame, Index, MultiIndex, Series, isna, notna +import pandas._testing as tm from pandas.tests.window.common import ConsistencyBase -import pandas.util.testing as tm class TestExpandingMomentsConsistency(ConsistencyBase): diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py index c110ed172ecb9..9495d2bc7d51d 100644 --- a/pandas/tests/window/moments/test_moments_rolling.py +++ b/pandas/tests/window/moments/test_moments_rolling.py @@ -10,9 +10,9 @@ import pandas as pd from pandas import DataFrame, Index, Series, isna, notna +import pandas._testing as tm from pandas.core.window.common import _flex_binary_moment from pandas.tests.window.common import Base, ConsistencyBase -import pandas.util.testing as tm import pandas.tseries.offsets as offsets diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index 5085576cc96f0..5e70e13209de5 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -7,9 +7,9 @@ import pandas as pd from pandas import DataFrame, Index, Series, Timestamp, concat +import pandas._testing as tm from pandas.core.base import SpecificationError from pandas.tests.window.common import Base -import pandas.util.testing as tm class TestApi(Base): diff --git a/pandas/tests/window/test_apply.py b/pandas/tests/window/test_apply.py index 4b56cbd48c388..7132e64c1191c 100644 --- a/pandas/tests/window/test_apply.py +++ b/pandas/tests/window/test_apply.py @@ -4,7 +4,7 @@ import pandas.util._test_decorators as td from pandas import DataFrame, Series, Timestamp, date_range -import pandas.util.testing as tm +import pandas._testing as tm @pytest.mark.parametrize("bad_raw", [None, 1, 0]) diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py index 6a3f2c19babdc..606520c6d68ca 100644 --- a/pandas/tests/window/test_base_indexer.py +++ b/pandas/tests/window/test_base_indexer.py @@ -2,9 +2,9 @@ import pytest from pandas import DataFrame, Series +import pandas._testing as tm from pandas.api.indexers import BaseIndexer from pandas.core.window.indexers import ExpandingIndexer -import pandas.util.testing as tm def test_bad_get_window_bounds_signature(): diff --git a/pandas/tests/window/test_dtypes.py b/pandas/tests/window/test_dtypes.py index 9d023034c570a..b1c9b66ab09d3 100644 --- a/pandas/tests/window/test_dtypes.py +++ b/pandas/tests/window/test_dtypes.py @@ -4,8 +4,8 @@ import pytest from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.base import DataError -import pandas.util.testing as tm # gh-12373 : rolling functions error on float32 data # make sure rolling functions works for different dtypes diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py index 098acdff93ac6..fc4bd50f25c73 100644 --- a/pandas/tests/window/test_expanding.py +++ b/pandas/tests/window/test_expanding.py @@ -5,9 +5,9 @@ import pandas as pd from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.window import Expanding from pandas.tests.window.common import Base -import pandas.util.testing as tm class TestExpanding(Base): diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index 189942bc07d2a..355ef3a90d424 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -3,8 +3,8 @@ import pandas as pd from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.groupby.groupby import get_groupby -import pandas.util.testing as tm class TestGrouperGrouping: diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py index 2fbf05f678431..cc8aef1779b46 100644 --- a/pandas/tests/window/test_numba.py +++ b/pandas/tests/window/test_numba.py @@ -4,7 +4,7 @@ import pandas.util._test_decorators as td from pandas import Series -import pandas.util.testing as tm +import pandas._testing as tm @td.skip_if_no("numba", "0.46.0") diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py index 6f6d4c09526ff..717273cff64ea 100644 --- a/pandas/tests/window/test_pairwise.py +++ b/pandas/tests/window/test_pairwise.py @@ -3,8 +3,8 @@ import pytest from pandas import DataFrame, Series +import pandas._testing as tm from pandas.core.algorithms import safe_sort -import pandas.util.testing as tm class TestPairwise: diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 227055eb222f8..04fab93b71c4a 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -8,9 +8,9 @@ import pandas as pd from pandas import DataFrame, Index, Series +import pandas._testing as tm from pandas.core.window import Rolling from pandas.tests.window.common import Base -import pandas.util.testing as tm class TestRolling(Base): diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py index c0d47fc2ca624..5f5e10b5dd497 100644 --- a/pandas/tests/window/test_timeseries_window.py +++ b/pandas/tests/window/test_timeseries_window.py @@ -10,7 +10,7 @@ date_range, to_datetime, ) -import pandas.util.testing as tm +import pandas._testing as tm import pandas.tseries.offsets as offsets diff --git a/pandas/util/__init__.py b/pandas/util/__init__.py index d906c0371d207..231a23e247650 100644 --- a/pandas/util/__init__.py +++ b/pandas/util/__init__.py @@ -1,3 +1,4 @@ from pandas.util._decorators import Appender, Substitution, cache_readonly # noqa from pandas.core.util.hashing import hash_array, hash_pandas_object # noqa +from pandas.util.testing import testing # noqa: F401 diff --git a/pandas/util/testing/__init__.py b/pandas/util/testing/__init__.py new file mode 100644 index 0000000000000..02cbd19a9a888 --- /dev/null +++ b/pandas/util/testing/__init__.py @@ -0,0 +1,144 @@ +from pandas.util._depr_module import _DeprecatedModule + +_removals = [ + "Categorical", + "CategoricalIndex", + "Counter", + "DataFrame", + "DatetimeArray", + "DatetimeIndex", + "ExtensionArray", + "FrameOrSeries", + "Index", + "IntervalArray", + "IntervalIndex", + "K", + "List", + "MultiIndex", + "N", + "Optional", + "PeriodArray", + "RANDS_CHARS", + "RANDU_CHARS", + "RNGContext", + "RangeIndex", + "Series", + "SubclassedCategorical", + "SubclassedDataFrame", + "SubclassedSeries", + "TimedeltaArray", + "Union", + "all_index_generator", + "all_timeseries_index_generator", + "array_equivalent", + "assert_attr_equal", + "assert_class_equal", + "assert_contains_all", + "assert_copy", + "assert_dict_equal", + "assert_is_sorted", + "assert_is_valid_plot_return_object", + "assert_produces_warning", + "bdate_range", + "box_expected", + "bz2", + "can_connect", + "can_set_locale", + "cast", + "close", + "contextmanager", + "convert_rows_list_to_csv_str", + "datetime", + "decompress_file", + "ensure_clean", + "ensure_clean_dir", + "ensure_safe_environment_variables", + "equalContents", + "getCols", + "getMixedTypeDict", + "getPeriodData", + "getSeriesData", + "getTimeSeriesData", + "get_locales", + "gzip", + "index_subclass_makers_generator", + "is_bool", + "is_categorical_dtype", + "is_datetime64_dtype", + "is_datetime64tz_dtype", + "is_extension_array_dtype", + "is_interval_dtype", + "is_list_like", + "is_number", + "is_period_dtype", + "is_sequence", + "is_timedelta64_dtype", + "isiterable", + "lzma", + "makeBoolIndex", + "makeCategoricalIndex", + "makeCustomDataframe", + "makeCustomIndex", + "makeDataFrame", + "makeDateIndex", + "makeFloatIndex", + "makeFloatSeries", + "makeIntIndex", + "makeIntervalIndex", + "makeMissingCustomDataframe", + "makeMissingDataframe", + "makeMixedDataFrame", + "makeMultiIndex", + "makeObjectSeries", + "makePeriodFrame", + "makePeriodIndex", + "makePeriodSeries", + "makeRangeIndex", + "makeStringIndex", + "makeStringSeries", + "makeTimeDataFrame", + "makeTimeSeries", + "makeTimedeltaIndex", + "makeUIntIndex", + "makeUnicodeIndex", + "needs_i8_conversion", + "network", + "np", + "optional_args", + "os", + "pd", + "period_array", + "pprint_thing", + "raise_assert_detail", + "rand", + "randbool", + "randn", + "rands", + "rands_array", + "randu", + "randu_array", + "reset_display_options", + "reset_testing_mode", + "rmtree", + "round_trip_localpath", + "round_trip_pathlib", + "round_trip_pickle", + "set_locale", + "set_testing_mode", + "set_timezone", + "string", + "take_1d", + "tempfile", + "test_parallel", + "to_array", + "urlopen", + "use_numexpr", + "warnings", + "with_connectivity_check", + "with_csv_dialect", + "wraps", + "write_to_compressed", + "zipfile", +] + +testing = _DeprecatedModule("pandas._testing", "pandas.testing", _removals) diff --git a/scripts/list_future_warnings.sh b/scripts/list_future_warnings.sh index 0c4046bbb5f49..121f4f5a92abb 100755 --- a/scripts/list_future_warnings.sh +++ b/scripts/list_future_warnings.sh @@ -25,7 +25,7 @@ EXCLUDE="^pandas/tests/|" # tests validate that FutureWarnings are raised EXCLUDE+="^pandas/util/_decorators.py$|" # generic deprecate function that raises warning EXCLUDE+="^pandas/util/_depr_module.py$|" # generic deprecate module that raises warnings -EXCLUDE+="^pandas/util/testing.py$|" # contains function to evaluate if warning is raised +EXCLUDE+="^pandas._testing.py$|" # contains function to evaluate if warning is raised EXCLUDE+="^pandas/io/parsers.py$" # implements generic deprecation system in io reading BASE_DIR="$(dirname $0)/.." diff --git a/setup.cfg b/setup.cfg index 96af78c77feb8..1484198929973 100644 --- a/setup.cfg +++ b/setup.cfg @@ -109,7 +109,7 @@ known_dtypes = pandas.core.dtypes known_post_core = pandas.tseries,pandas.io,pandas.plotting sections = FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER known_first_party = pandas -known_third_party = _pytest,announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,yaml +known_third_party = _pytest,announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,yaml,odf multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0 @@ -340,5 +340,5 @@ check_untyped_defs=False [mypy-pandas.tseries.offsets] check_untyped_defs=False -[mypy-pandas.util.testing] +[mypy-pandas._testing] check_untyped_defs=False
Closes https://github.com/pandas-dev/pandas/issues/16232. I tried to keep the commits somewhat clean. https://github.com/pandas-dev/pandas/commit/e96624b8a3f903798f7977fb92da09fc7417ec98 and https://github.com/pandas-dev/pandas/commit/d52d35f9d6a5e721ae0e6aa1579e44dac820ed7a can probably be ignored. That's just moving `pandas.util.testing` to `pandas.util._testing` and updating all the relevant imports. Main question: do we want to make the `tm.make*` methods public? I'd say we probably should, but wanted to confirm.
https://api.github.com/repos/pandas-dev/pandas/pulls/30620
2020-01-02T17:27:01Z
2020-01-03T19:34:02Z
2020-01-03T19:34:02Z
2020-01-07T16:29:07Z
TYP: enable strict_equality to prohibit comparisons of non-overlappin…
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index 7301c0ab434a0..63a2298ee02d5 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -60,6 +60,8 @@ class BooleanDtype(ExtensionDtype): BooleanDtype """ + name = "boolean" + @property def na_value(self) -> "Scalar": """ @@ -79,19 +81,6 @@ def type(self) -> Type: def kind(self) -> str: return "b" - @property - def name(self) -> str: - """ - The alias for BooleanDtype is ``'boolean'``. - """ - return "boolean" - - @classmethod - def construct_from_string(cls, string: str) -> ExtensionDtype: - if string == "boolean": - return cls() - return super().construct_from_string(string) - @classmethod def construct_array_type(cls) -> "Type[BooleanArray]": return BooleanArray diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 1eeb9ddc8e064..7d17ef825e6d9 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -320,7 +320,7 @@ def _check_compatible_with(self, other): def dtype(self): return self._dtype - # read-only property overwriting read/write + # error: Read-only property cannot override read-write property [misc] @property # type: ignore def freq(self): """ @@ -638,7 +638,7 @@ def _sub_period(self, other): return new_data def _addsub_int_array( - self, other: np.ndarray, op: Callable[[Any], Any], + self, other: np.ndarray, op: Callable[[Any, Any], Any], ) -> "PeriodArray": """ Add or subtract array of integers; equivalent to applying diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index de254f662bb32..0da877fb1ad45 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -47,6 +47,8 @@ class StringDtype(ExtensionDtype): StringDtype """ + name = "string" + #: StringDtype.na_value uses pandas.NA na_value = libmissing.NA @@ -54,19 +56,6 @@ class StringDtype(ExtensionDtype): def type(self) -> Type: return str - @property - def name(self) -> str: - """ - The alias for StringDtype is ``'string'``. - """ - return "string" - - @classmethod - def construct_from_string(cls, string: str) -> ExtensionDtype: - if string == "string": - return cls() - return super().construct_from_string(string) - @classmethod def construct_array_type(cls) -> "Type[StringArray]": return StringArray diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 4a06ea9500770..1b4e7062b38e5 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -236,6 +236,10 @@ def construct_from_string(cls, string: str): """ if not isinstance(string, str): raise TypeError(f"Expects a string, got {type(string).__name__}") + + # error: Non-overlapping equality check (left operand type: "str", right + # operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap] + assert isinstance(cls.name, str), (cls, type(cls.name)) if string != cls.name: raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") return cls() diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 2afb41e7bdc7e..ec5f6fcb17ff8 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -156,7 +156,7 @@ def _get_cell_value(self, cell, convert_float: bool) -> Scalar: # GH5394 cell_value = float(cell.attributes.get((OFFICENS, "value"))) - if cell_value == 0.0 and str(cell) != cell_value: # NA handling + if cell_value == 0.0: # NA handling return str(cell) if convert_float: diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 1b18e0fc3f0fa..ea22999470102 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -231,7 +231,7 @@ def __init__( self, series: "Series", buf: Optional[IO[str]] = None, - length: bool = True, + length: Union[bool, str] = True, header: bool = True, index: bool = True, na_rep: str = "NaN", @@ -450,7 +450,7 @@ def _get_adjustment() -> TextAdjustment: class TableFormatter: - show_dimensions: bool + show_dimensions: Union[bool, str] is_truncated: bool formatters: formatters_type columns: Index @@ -554,7 +554,7 @@ def __init__( max_rows: Optional[int] = None, min_rows: Optional[int] = None, max_cols: Optional[int] = None, - show_dimensions: bool = False, + show_dimensions: Union[bool, str] = False, decimal: str = ".", table_id: Optional[str] = None, render_links: bool = False, @@ -1276,7 +1276,7 @@ class FloatArrayFormatter(GenericArrayFormatter): """ def __init__(self, *args, **kwargs): - GenericArrayFormatter.__init__(self, *args, **kwargs) + super().__init__(*args, **kwargs) # float_format is expected to be a string # formatter should be used to pass a function diff --git a/setup.cfg b/setup.cfg index 96af78c77feb8..700c9fdea12b2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -123,6 +123,7 @@ skip = pandas/__init__.py,pandas/core/api.py ignore_missing_imports=True no_implicit_optional=True check_untyped_defs=True +strict_equality=True [mypy-pandas.tests.*] check_untyped_defs=False
…g types By default, mypy allows always-false comparisons like 42 == 'no'. Use this flag to prohibit such comparisons of non-overlapping types, and similar identity and container checks ``` pandas\core\dtypes\base.py:239: error: Non-overlapping equality check (left operand type: "str", right operand type: "Callable[[ExtensionDtype], str]") pandas\core\arrays\period.py:657: error: Non-overlapping container check (element type: "Callable[[Any], Any]", container item type: "Callable[[Any, Any], Any]") pandas\core\arrays\period.py:658: error: Non-overlapping identity check (left operand type: "Callable[[Any], Any]", right operand type: "Callable[[Any, Any], Any]") pandas\io\formats\format.py:307: error: Non-overlapping equality check (left operand type: "bool", right operand type: "Literal['truncate']") pandas\io\formats\format.py:461: error: Non-overlapping equality check (left operand type: "bool", right operand type: "Literal['truncate']") pandas\io\excel\_odfreader.py:159: error: Non-overlapping equality check (left operand type: "str", right operand type: "float") ```
https://api.github.com/repos/pandas-dev/pandas/pulls/30619
2020-01-02T15:36:41Z
2020-01-04T18:28:54Z
2020-01-04T18:28:54Z
2020-01-05T10:02:51Z
DEPR: is_copy arg of take
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index df747cb9654a9..3f94894e1da3c 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -579,6 +579,7 @@ Deprecations - The deprecated internal attributes ``_start``, ``_stop`` and ``_step`` of :class:`RangeIndex` now raise a ``FutureWarning`` instead of a ``DeprecationWarning`` (:issue:`26581`) - The ``pandas.util.testing`` module has been deprecated. Use the public API in ``pandas.testing`` documented at :ref:`api.general.testing` (:issue:`16232`). - ``pandas.SparseArray`` has been deprecated. Use ``pandas.arrays.SparseArray`` (:class:`arrays.SparseArray`) instead. (:issue:`30642`) +- The parameter ``is_copy`` of :meth:`DataFrame.take` has been deprecated and will be removed in a future version. (:issue:`27357`) **Selecting Columns from a Grouped DataFrame** diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3b8e9cf82f08c..bf183523ba358 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3275,7 +3275,7 @@ def _clear_item_cache(self) -> None: # Indexing Methods def take( - self: FrameOrSeries, indices, axis=0, is_copy: bool_t = True, **kwargs + self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs ) -> FrameOrSeries: """ Return the elements in the given *positional* indices along an axis. @@ -3293,6 +3293,8 @@ def take( selecting rows, ``1`` means that we are selecting columns. is_copy : bool, default True Whether to return a copy of the original object or not. + + .. deprecated:: 1.0.0 **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. @@ -3351,6 +3353,16 @@ class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ + if is_copy is not None: + warnings.warn( + "is_copy is deprecated and will be removed in a future version. " + "take will always return a copy in the future.", + FutureWarning, + stacklevel=2, + ) + else: + is_copy = True + nv.validate_take(tuple(), kwargs) self._consolidate_inplace() @@ -5014,7 +5026,7 @@ def sample( ) locs = rs.choice(axis_length, size=n, replace=replace, p=weights) - return self.take(locs, axis=axis, is_copy=False) + return self.take(locs, axis=axis) _shared_docs[ "pipe" @@ -7011,7 +7023,8 @@ def asof(self, where, subset=None): # mask the missing missing = locs == -1 - data = self.take(locs, is_copy=False) + d = self.take(locs) + data = d.copy() data.index = where data.loc[missing] = np.nan return data if is_list else data.iloc[-1] diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 7e7261130ff4a..5436101540c55 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -194,7 +194,7 @@ def _set_grouper(self, obj: FrameOrSeries, sort: bool = False): # use stable sort to support first, last, nth indexer = self.indexer = ax.argsort(kind="mergesort") ax = ax.take(indexer) - obj = obj.take(indexer, axis=self.axis, is_copy=False) + obj = obj.take(indexer, axis=self.axis) self.obj = obj self.grouper = ax diff --git a/pandas/tests/frame/methods/test_asof.py b/pandas/tests/frame/methods/test_asof.py index 055f81c9942a6..774369794cb90 100644 --- a/pandas/tests/frame/methods/test_asof.py +++ b/pandas/tests/frame/methods/test_asof.py @@ -30,6 +30,7 @@ def test_basic(self, date_range_frame): ub = df.index[30] dates = list(dates) + result = df.asof(dates) assert result.notna().all(1).all() @@ -65,6 +66,7 @@ def test_missing(self, date_range_frame): # no match found - `where` value before earliest date in index N = 10 df = date_range_frame.iloc[:N].copy() + result = df.asof("1989-12-31") expected = Series( @@ -132,5 +134,6 @@ def test_time_zone_aware_index(self, stamp, expected): Timestamp("2018-01-01 22:35:10.550+00:00"), ], ) + result = df.asof(stamp) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index cdb79dc6606ff..10a1e09a09bf8 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -820,6 +820,18 @@ def test_take_invalid_kwargs(self): with pytest.raises(ValueError, match=msg): obj.take(indices, mode="clip") + def test_depr_take_kwarg_is_copy(self): + # GH 27357 + df = DataFrame({"A": [1, 2, 3]}) + msg = ( + "is_copy is deprecated and will be removed in a future version. " + "take will always return a copy in the future." + ) + with tm.assert_produces_warning(FutureWarning) as w: + df.take([0, 1], is_copy=True) + + assert w[0].message.args[0] == msg + def test_equals(self): s1 = pd.Series([1, 2, 3], index=[0, 2, 1]) s2 = s1.copy()
- [x] closes #27357 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30615
2020-01-02T04:28:17Z
2020-01-06T13:21:33Z
2020-01-06T13:21:33Z
2020-01-08T09:54:00Z
TYP: Add TypeVars to NDFrame
diff --git a/pandas/core/base.py b/pandas/core/base.py index ef7e59c9e19d7..d38dbec684f35 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -8,6 +8,7 @@ import numpy as np import pandas._libs.lib as lib +from pandas._typing import T from pandas.compat import PYPY from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -87,6 +88,14 @@ def __sizeof__(self): # object's 'sizeof' return super().__sizeof__() + def _ensure_type(self: T, obj) -> T: + """Ensure that an object has same type as self. + + Used by type checkers. + """ + assert isinstance(obj, type(self)), type(obj) + return obj + class NoNewAttributesMixin: """Mixin which prevents adding new attributes. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 85bbf9b553b0a..d4e36a1894873 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -19,6 +19,7 @@ Sequence, Set, Tuple, + Type, Union, ) import warnings @@ -262,7 +263,7 @@ def _validate_dtype(self, dtype): # Construction @property - def _constructor(self): + def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]: """Used when a manipulation result has the same dimensions as the original. """ @@ -298,7 +299,7 @@ def _constructor_expanddim(self): _AXIS_LEN: int @classmethod - def _setup_axes(cls, axes: List[str], docs: Dict[str, str]): + def _setup_axes(cls, axes: List[str], docs: Dict[str, str]) -> None: """ Provide axes setup for the major PandasObjects. @@ -373,7 +374,7 @@ def _construct_axes_from_arguments( return axes, kwargs @classmethod - def _from_axes(cls, data, axes, **kwargs): + def _from_axes(cls: Type[FrameOrSeries], data, axes, **kwargs) -> FrameOrSeries: # for construction from BlockManager if isinstance(data, BlockManager): return cls(data, **kwargs) @@ -486,7 +487,7 @@ def shape(self) -> Tuple[int, ...]: return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) @property - def axes(self): + def axes(self) -> List[Index]: """ Return index label(s) of the internal NDFrame """ @@ -639,11 +640,11 @@ def set_axis(self, labels, axis=0, inplace=False): obj.set_axis(labels, axis=axis, inplace=True) return obj - def _set_axis(self, axis, labels): + def _set_axis(self, axis, labels) -> None: self._data.set_axis(axis, labels) self._clear_item_cache() - def swapaxes(self, axis1, axis2, copy=True): + def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries: """ Interchange axes and swap values axes appropriately. @@ -668,7 +669,7 @@ def swapaxes(self, axis1, axis2, copy=True): return self._constructor(new_values, *new_axes).__finalize__(self) - def droplevel(self, level, axis=0): + def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: """ Return DataFrame with requested index / column level(s) removed. @@ -728,7 +729,7 @@ def droplevel(self, level, axis=0): result = self.set_axis(new_labels, axis=axis, inplace=False) return result - def pop(self, item): + def pop(self: FrameOrSeries, item) -> FrameOrSeries: """ Return item and drop from frame. Raise KeyError if not found. @@ -889,7 +890,7 @@ def squeeze(self, axis=None): ) ] - def swaplevel(self, i=-2, j=-1, axis=0): + def swaplevel(self: FrameOrSeries, i=-2, j=-1, axis=0) -> FrameOrSeries: """ Swap levels i and j in a MultiIndex on a particular axis @@ -1473,10 +1474,10 @@ def bool(self): self.__nonzero__() - def __abs__(self): + def __abs__(self: FrameOrSeries) -> FrameOrSeries: return self.abs() - def __round__(self, decimals=0): + def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries: return self.round(decimals) # ------------------------------------------------------------------------- @@ -2123,7 +2124,7 @@ def to_excel( inf_rep="inf", verbose=True, freeze_panes=None, - ): + ) -> None: df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter @@ -2347,7 +2348,7 @@ def to_hdf( data_columns: Optional[List[str]] = None, errors: str = "strict", encoding: str = "UTF-8", - ): + ) -> None: """ Write the contained data to an HDF5 file using HDFStore. @@ -2691,7 +2692,9 @@ def to_pickle( to_pickle(self, path, compression=compression, protocol=protocol) - def to_clipboard(self, excel: bool_t = True, sep: Optional[str] = None, **kwargs): + def to_clipboard( + self, excel: bool_t = True, sep: Optional[str] = None, **kwargs + ) -> None: r""" Copy object to the system clipboard. @@ -3259,7 +3262,9 @@ def _clear_item_cache(self) -> None: # ---------------------------------------------------------------------- # Indexing Methods - def take(self, indices, axis=0, is_copy: bool_t = True, **kwargs): + def take( + self: FrameOrSeries, indices, axis=0, is_copy: bool_t = True, **kwargs + ) -> FrameOrSeries: """ Return the elements in the given *positional* indices along an axis. @@ -3542,7 +3547,7 @@ def _iget_item_cache(self, item): def _box_item_values(self, key, values): raise AbstractMethodError(self) - def _slice(self, slobj: slice, axis=0, kind=None): + def _slice(self: FrameOrSeries, slobj: slice, axis=0, kind=None) -> FrameOrSeries: """ Construct a slice of this container. @@ -3668,7 +3673,7 @@ def _check_setitem_copy(self, stacklevel=4, t="setting", force=False): elif value == "warn": warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel) - def __delitem__(self, key): + def __delitem__(self, key) -> None: """ Delete item """ @@ -3730,13 +3735,13 @@ def _is_view(self): return self._data.is_view def reindex_like( - self, + self: FrameOrSeries, other, method: Optional[str] = None, copy: bool_t = True, limit=None, tolerance=None, - ): + ) -> FrameOrSeries: """ Return an object with matching indices as other object. @@ -3878,7 +3883,9 @@ def drop( else: return obj - def _drop_axis(self, labels, axis, level=None, errors: str = "raise"): + def _drop_axis( + self: FrameOrSeries, labels, axis, level=None, errors: str = "raise" + ) -> FrameOrSeries: """ Drop labels from specified axis. Used in the ``drop`` method internally. @@ -3948,7 +3955,7 @@ def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: self._data = getattr(result, "_data", result) self._maybe_update_cacher(verify_is_copy=verify_is_copy) - def add_prefix(self, prefix: str): + def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries: """ Prefix labels with string `prefix`. @@ -4007,7 +4014,7 @@ def add_prefix(self, prefix: str): mapper = {self._info_axis_name: f} return self.rename(**mapper) - def add_suffix(self, suffix: str): + def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries: """ Suffix labels with string `suffix`. @@ -4227,7 +4234,7 @@ def sort_index( new_axis = labels.take(sort_index) return self.reindex(**{axis_name: new_axis}) - def reindex(self, *args, **kwargs): + def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries: """ Conform %(klass)s to new index with optional filling logic. @@ -4475,7 +4482,9 @@ def reindex(self, *args, **kwargs): axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self) - def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): + def _reindex_axes( + self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy + ) -> FrameOrSeries: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: @@ -4511,12 +4520,12 @@ def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) def _reindex_with_indexers( - self, + self: FrameOrSeries, reindexers, fill_value=None, copy: bool_t = False, allow_dups: bool_t = False, - ): + ) -> FrameOrSeries: """allow_dups indicates an internal call here """ # reindex doing multiple operations on different axes if indicated @@ -4548,12 +4557,12 @@ def _reindex_with_indexers( return self._constructor(new_data).__finalize__(self) def filter( - self, + self: FrameOrSeries, items=None, like: Optional[str] = None, regex: Optional[str] = None, axis=None, - ): + ) -> FrameOrSeries: """ Subset the dataframe rows or columns according to the specified index labels. @@ -4793,14 +4802,14 @@ def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries: return self.iloc[-n:] def sample( - self, + self: FrameOrSeries, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None, - ): + ) -> FrameOrSeries: """ Return a random sample of items from an axis of object. @@ -5339,7 +5348,7 @@ def _get_bool_data(self): # Internal Interface Methods @property - def values(self): + def values(self) -> np.ndarray: """ Return a Numpy representation of the DataFrame. @@ -5416,16 +5425,16 @@ def values(self): return self._data.as_array(transpose=self._AXIS_REVERSED) @property - def _values(self): + def _values(self) -> np.ndarray: """internal implementation""" return self.values @property - def _get_values(self): + def _get_values(self) -> np.ndarray: # compat return self.values - def _internal_get_values(self): + def _internal_get_values(self) -> np.ndarray: """ Return an ndarray after converting sparse values to dense. @@ -5489,7 +5498,9 @@ def _to_dict_of_blocks(self, copy: bool_t = True): for k, v, in self._data.to_dict(copy=copy).items() } - def astype(self, dtype, copy: bool_t = True, errors: str = "raise"): + def astype( + self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise" + ) -> FrameOrSeries: """ Cast a pandas object to a specified dtype ``dtype``. @@ -5797,7 +5808,7 @@ def _convert( ) ).__finalize__(self) - def infer_objects(self): + def infer_objects(self: FrameOrSeries) -> FrameOrSeries: """ Attempt to infer better dtypes for object columns. @@ -7054,11 +7065,11 @@ def asof(self, where, subset=None): """ @Appender(_shared_docs["isna"] % _shared_doc_kwargs) - def isna(self): + def isna(self: FrameOrSeries) -> FrameOrSeries: return isna(self).__finalize__(self) @Appender(_shared_docs["isna"] % _shared_doc_kwargs) - def isnull(self): + def isnull(self: FrameOrSeries) -> FrameOrSeries: return isna(self).__finalize__(self) _shared_docs[ @@ -7124,11 +7135,11 @@ def isnull(self): """ @Appender(_shared_docs["notna"] % _shared_doc_kwargs) - def notna(self): + def notna(self: FrameOrSeries) -> FrameOrSeries: return notna(self).__finalize__(self) @Appender(_shared_docs["notna"] % _shared_doc_kwargs) - def notnull(self): + def notnull(self: FrameOrSeries) -> FrameOrSeries: return notna(self).__finalize__(self) def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): @@ -7180,14 +7191,14 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace): return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( - self, + self: FrameOrSeries, lower=None, upper=None, axis=None, inplace: bool_t = False, *args, **kwargs, - ): + ) -> FrameOrSeries: """ Trim values at input threshold(s). @@ -7364,13 +7375,13 @@ def clip( """ def asfreq( - self, + self: FrameOrSeries, freq, method=None, how: Optional[str] = None, normalize: bool_t = False, fill_value=None, - ): + ) -> FrameOrSeries: """ Convert TimeSeries to specified frequency. @@ -7473,7 +7484,9 @@ def asfreq( fill_value=fill_value, ) - def at_time(self, time, asof: bool_t = False, axis=None): + def at_time( + self: FrameOrSeries, time, asof: bool_t = False, axis=None + ) -> FrameOrSeries: """ Select values at particular time of day (e.g. 9:30AM). @@ -7530,13 +7543,13 @@ def at_time(self, time, asof: bool_t = False, axis=None): return self.take(indexer, axis=axis) def between_time( - self, + self: FrameOrSeries, start_time, end_time, include_start: bool_t = True, include_end: bool_t = True, axis=None, - ): + ) -> FrameOrSeries: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). @@ -7905,7 +7918,7 @@ def resample( level=level, ) - def first(self, offset): + def first(self: FrameOrSeries, offset) -> FrameOrSeries: """ Method to subset initial periods of time series data based on a date offset. @@ -7967,7 +7980,7 @@ def first(self, offset): return self.loc[:end] - def last(self, offset): + def last(self: FrameOrSeries, offset) -> FrameOrSeries: """ Method to subset final periods of time series data based on a date offset. @@ -8326,8 +8339,12 @@ def _align_frame( ) if method is not None: - left = left.fillna(axis=fill_axis, method=method, limit=limit) - right = right.fillna(axis=fill_axis, method=method, limit=limit) + left = self._ensure_type( + left.fillna(method=method, axis=fill_axis, limit=limit) + ) + right = self._ensure_type( + right.fillna(method=method, axis=fill_axis, limit=limit) + ) # if DatetimeIndex have different tz, convert to UTC if is_datetime64tz_dtype(left.index): @@ -8820,7 +8837,9 @@ def mask( """ @Appender(_shared_docs["shift"] % _shared_doc_kwargs) - def shift(self, periods=1, freq=None, axis=0, fill_value=None): + def shift( + self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None + ) -> FrameOrSeries: if periods == 0: return self.copy() @@ -8871,7 +8890,9 @@ def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries: return new_obj.__finalize__(self) - def tshift(self, periods: int = 1, freq=None, axis=0): + def tshift( + self: FrameOrSeries, periods: int = 1, freq=None, axis=0 + ) -> FrameOrSeries: """ Shift the time index, using the index's frequency if available. @@ -9314,7 +9335,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent): # ---------------------------------------------------------------------- # Numeric Methods - def abs(self): + def abs(self: FrameOrSeries) -> FrameOrSeries: """ Return a Series/DataFrame with absolute numeric value of each element. @@ -9383,7 +9404,9 @@ def abs(self): """ return np.abs(self) - def describe(self, percentiles=None, include=None, exclude=None): + def describe( + self: FrameOrSeries, percentiles=None, include=None, exclude=None + ) -> FrameOrSeries: """ Generate descriptive statistics. @@ -9719,7 +9742,7 @@ def describe_1d(data): ldesc = [describe_1d(s) for _, s in data.items()] # set a convenient order for rows - names = [] + names: List[Optional[Hashable]] = [] ldesc_indexes = sorted((x.index for x in ldesc), key=len) for idxnames in ldesc_indexes: for name in idxnames: @@ -9848,13 +9871,22 @@ def describe_1d(data): """ @Appender(_shared_docs["pct_change"] % _shared_doc_kwargs) - def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs): + def pct_change( + self: FrameOrSeries, + periods=1, + fill_method="pad", + limit=None, + freq=None, + **kwargs, + ) -> FrameOrSeries: # TODO: Not sure if above is correct - need someone to confirm. axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: - data = self.fillna(method=fill_method, limit=limit, axis=axis) + data = self._ensure_type( + self.fillna(method=fill_method, axis=axis, limit=limit) + ) rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1 rs = rs.loc[~rs.index.duplicated()] diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 2eb2990bd58c4..c544c132d6921 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -148,9 +148,7 @@ def pivot_table( table = table.sort_index(axis=1) if fill_value is not None: - filled = table.fillna(value=fill_value, downcast="infer") - assert filled is not None # needed for mypy - table = filled + table = table._ensure_type(table.fillna(fill_value, downcast="infer")) if margins: if dropna:
Adding TypeVars to NDFrame methods that don't return optional values. This ensures that e.g. ``(DataFrame|Series).astype`` and ``(DataFrame|Series).copy`` have a known return type, which is nice. Also adds ``PandasObject._ensure_type``, which is a method used to solve the problem with optional return values, but where we actually know the return type. This is a helper method to help with the tediousness of optional return values experienced in #30565.
https://api.github.com/repos/pandas-dev/pandas/pulls/30613
2020-01-02T03:08:10Z
2020-01-03T01:05:17Z
2020-01-03T01:05:17Z
2020-03-02T22:35:16Z
CLN: replacing str.format with f-strings in several files. #29547
diff --git a/pandas/tests/io/formats/test_css.py b/pandas/tests/io/formats/test_css.py index a6ad5d5edbf5f..0d18b57c6d6e5 100644 --- a/pandas/tests/io/formats/test_css.py +++ b/pandas/tests/io/formats/test_css.py @@ -101,29 +101,25 @@ def test_css_side_shorthands(shorthand, expansions): top, right, bottom, left = expansions assert_resolves( - "{shorthand}: 1pt".format(shorthand=shorthand), - {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"}, + f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"}, ) assert_resolves( - "{shorthand}: 1pt 4pt".format(shorthand=shorthand), - {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"}, + f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"}, ) assert_resolves( - "{shorthand}: 1pt 4pt 2pt".format(shorthand=shorthand), + f"{shorthand}: 1pt 4pt 2pt", {top: "1pt", right: "4pt", bottom: "2pt", left: "4pt"}, ) assert_resolves( - "{shorthand}: 1pt 4pt 2pt 0pt".format(shorthand=shorthand), + f"{shorthand}: 1pt 4pt 2pt 0pt", {top: "1pt", right: "4pt", bottom: "2pt", left: "0pt"}, ) with tm.assert_produces_warning(CSSWarning): - assert_resolves( - "{shorthand}: 1pt 1pt 1pt 1pt 1pt".format(shorthand=shorthand), {} - ) + assert_resolves(f"{shorthand}: 1pt 1pt 1pt 1pt 1pt", {}) @pytest.mark.parametrize( @@ -174,10 +170,10 @@ def test_css_none_absent(style, equiv): "size,resolved", [ ("xx-small", "6pt"), - ("x-small", "{pt:f}pt".format(pt=7.5)), - ("small", "{pt:f}pt".format(pt=9.6)), + ("x-small", f"{7.5:f}pt"), + ("small", f"{9.6:f}pt"), ("medium", "12pt"), - ("large", "{pt:f}pt".format(pt=13.5)), + ("large", f"{13.5:f}pt"), ("x-large", "18pt"), ("xx-large", "24pt"), ("8px", "6pt"), @@ -196,9 +192,7 @@ def test_css_absolute_font_size(size, relative_to, resolved): else: inherited = {"font-size": relative_to} assert_resolves( - "font-size: {size}".format(size=size), - {"font-size": resolved}, - inherited=inherited, + f"font-size: {size}", {"font-size": resolved}, inherited=inherited, ) @@ -224,7 +218,7 @@ def test_css_absolute_font_size(size, relative_to, resolved): ("inherit", "16pt", "16pt"), ("smaller", None, "10pt"), ("smaller", "18pt", "15pt"), - ("larger", None, "{pt:f}pt".format(pt=14.4)), + ("larger", None, f"{14.4:f}pt"), ("larger", "15pt", "18pt"), ], ) @@ -234,7 +228,5 @@ def test_css_relative_font_size(size, relative_to, resolved): else: inherited = {"font-size": relative_to} assert_resolves( - "font-size: {size}".format(size=size), - {"font-size": resolved}, - inherited=inherited, + f"font-size: {size}", {"font-size": resolved}, inherited=inherited, ) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index f51dd2918efff..3b9deeca54af9 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -421,12 +421,10 @@ def test_repr_truncation_column_size(self): def test_repr_max_columns_max_rows(self): term_width, term_height = get_terminal_size() if term_width < 10 or term_height < 10: - pytest.skip( - "terminal size too small, {0} x {1}".format(term_width, term_height) - ) + pytest.skip(f"terminal size too small, {term_width} x {term_height}") def mkframe(n): - index = ["{i:05d}".format(i=i) for i in range(n)] + index = [f"{i:05d}" for i in range(n)] return DataFrame(0, index, index) df6 = mkframe(6) @@ -667,9 +665,9 @@ def test_to_string_with_formatters(self): ) formatters = [ - ("int", lambda x: "0x{x:x}".format(x=x)), - ("float", lambda x: "[{x: 4.1f}]".format(x=x)), - ("object", lambda x: "-{x!s}-".format(x=x)), + ("int", lambda x: f"0x{x:x}"), + ("float", lambda x: f"[{x: 4.1f}]"), + ("object", lambda x: f"-{x!s}-"), ] result = df.to_string(formatters=dict(formatters)) result2 = df.to_string(formatters=list(zip(*formatters))[1]) @@ -711,7 +709,7 @@ def format_func(x): def test_to_string_with_formatters_unicode(self): df = DataFrame({"c/\u03c3": [1, 2, 3]}) - result = df.to_string(formatters={"c/\u03c3": lambda x: "{x}".format(x=x)}) + result = df.to_string(formatters={"c/\u03c3": str}) assert result == " c/\u03c3\n" + "0 1\n1 2\n2 3" def test_east_asian_unicode_false(self): @@ -1240,7 +1238,7 @@ def test_wide_repr(self): set_option("display.expand_frame_repr", False) rep_str = repr(df) - assert "10 rows x {c} columns".format(c=max_cols - 1) in rep_str + assert f"10 rows x {max_cols - 1} columns" in rep_str set_option("display.expand_frame_repr", True) wide_repr = repr(df) assert rep_str != wide_repr @@ -1351,7 +1349,7 @@ def test_long_series(self): n = 1000 s = Series( np.random.randint(-50, 50, n), - index=["s{x:04d}".format(x=x) for x in range(n)], + index=[f"s{x:04d}" for x in range(n)], dtype="int64", ) @@ -1477,9 +1475,7 @@ def test_to_string(self): expected = ["A"] assert header == expected - biggie.to_string( - columns=["B", "A"], formatters={"A": lambda x: "{x:.1f}".format(x=x)} - ) + biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"}) biggie.to_string(columns=["B", "A"], float_format=str) biggie.to_string(columns=["B", "A"], col_space=12, float_format=str) @@ -1610,7 +1606,7 @@ def test_to_string_small_float_values(self): result = df.to_string() # sadness per above - if "{x:.4g}".format(x=1.7e8) == "1.7e+008": + if _three_digit_exp(): expected = ( " a\n" "0 1.500000e+000\n" @@ -1922,7 +1918,7 @@ def test_repr_html_long(self): long_repr = df._repr_html_() assert ".." in long_repr assert str(41 + max_rows // 2) not in long_repr - assert "{h} rows ".format(h=h) in long_repr + assert f"{h} rows " in long_repr assert "2 columns" in long_repr def test_repr_html_float(self): @@ -1939,7 +1935,7 @@ def test_repr_html_float(self): ).set_index("idx") reg_repr = df._repr_html_() assert ".." not in reg_repr - assert "<td>{val}</td>".format(val=str(40 + h)) in reg_repr + assert f"<td>{40 + h}</td>" in reg_repr h = max_rows + 1 df = DataFrame( @@ -1951,8 +1947,8 @@ def test_repr_html_float(self): ).set_index("idx") long_repr = df._repr_html_() assert ".." in long_repr - assert "<td>{val}</td>".format(val="31") not in long_repr - assert "{h} rows ".format(h=h) in long_repr + assert "<td>31</td>" not in long_repr + assert f"{h} rows " in long_repr assert "2 columns" in long_repr def test_repr_html_long_multiindex(self): @@ -2181,9 +2177,7 @@ def test_to_string(self): cp.name = "foo" result = cp.to_string(length=True, name=True, dtype=True) last_line = result.split("\n")[-1].strip() - assert last_line == ( - "Freq: B, Name: foo, Length: {cp}, dtype: float64".format(cp=len(cp)) - ) + assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64") def test_freq_name_separation(self): s = Series( @@ -2782,7 +2776,7 @@ def test_to_string_na_rep(self): def test_to_string_float_format(self): s = pd.Series(range(10), dtype="float64") - res = s.to_string(float_format=lambda x: "{0:2.1f}".format(x), max_rows=2) + res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2) exp = "0 0.0\n ..\n9 9.0" assert res == exp @@ -2807,7 +2801,7 @@ def test_to_string_multindex_header(self): def _three_digit_exp(): - return "{x:.4g}".format(x=1.7e8) == "1.7e+008" + return f"{1.7e8:.4g}" == "1.7e+008" class TestFloatArrayFormatter: diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 5a3afb5025e51..f3fe3debdd217 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -24,7 +24,7 @@ def setup_method(self, method): self.g = lambda x: x def h(x, foo="bar"): - return pd.Series("color: {foo}".format(foo=foo), index=x.index, name=x.name) + return pd.Series(f"color: {foo}", index=x.index, name=x.name) self.h = h self.styler = Styler(self.df) @@ -278,7 +278,7 @@ def test_numeric_columns(self): def test_apply_axis(self): df = pd.DataFrame({"A": [0, 0], "B": [1, 1]}) - f = lambda x: ["val: {max}".format(max=x.max()) for v in x] + f = lambda x: [f"val: {x.max()}" for v in x] result = df.style.apply(f, axis=1) assert len(result._todo) == 1 assert len(result.ctx) == 0 @@ -362,7 +362,7 @@ def color_negative_red(val): strings, black otherwise. """ color = "red" if val < 0 else "black" - return "color: {color}".format(color=color) + return f"color: {color}" dic = { ("a", "d"): [-1.12, 2.11], @@ -1215,13 +1215,9 @@ def test_highlight_max(self): def test_export(self): f = lambda x: "color: red" if x > 0 else "color: blue" - g = ( - lambda x, y, z: "color: {z}".format(z=z) - if x > 0 - else "color: {z}".format(z=z) - ) + g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}" style1 = self.styler - style1.applymap(f).applymap(g, y="a", z="b").highlight_max() + style1.applymap(f).applymap(g, z="b").highlight_max() result = style1.export() style2 = self.df.style style2.use(result) @@ -1645,9 +1641,7 @@ def test_hide_columns_mult_levels(self): def test_pipe(self): def set_caption_from_template(styler, a, b): - return styler.set_caption( - "Dataframe with a = {a} and b = {b}".format(a=a, b=b) - ) + return styler.set_caption(f"Dataframe with a = {a} and b = {b}") styler = self.df.style.pipe(set_caption_from_template, "A", b="B") assert "Dataframe with a = A and b = B" in styler.render() diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 24233a0ec84b1..cfe61e6ed4d52 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -486,10 +486,7 @@ def test_to_csv_compression(self, compression_only, read_infer, to_infer): compression = compression_only if compression == "zip": - pytest.skip( - "{compression} is not supported " - "for to_csv".format(compression=compression) - ) + pytest.skip(f"{compression} is not supported for to_csv") # We'll complete file extension subsequently. filename = "test." diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py index 4d8edec7c7f14..a6c673e8c51d6 100644 --- a/pandas/tests/io/formats/test_to_excel.py +++ b/pandas/tests/io/formats/test_to_excel.py @@ -270,13 +270,13 @@ def test_css_to_excel_inherited(css, inherited, expected): def test_css_to_excel_good_colors(input_color, output_color): # see gh-18392 css = ( - "border-top-color: {color}; " - "border-right-color: {color}; " - "border-bottom-color: {color}; " - "border-left-color: {color}; " - "background-color: {color}; " - "color: {color}" - ).format(color=input_color) + f"border-top-color: {input_color}; " + f"border-right-color: {input_color}; " + f"border-bottom-color: {input_color}; " + f"border-left-color: {input_color}; " + f"background-color: {input_color}; " + f"color: {input_color}" + ) expected = dict() @@ -297,13 +297,13 @@ def test_css_to_excel_good_colors(input_color, output_color): def test_css_to_excel_bad_colors(input_color): # see gh-18392 css = ( - "border-top-color: {color}; " - "border-right-color: {color}; " - "border-bottom-color: {color}; " - "border-left-color: {color}; " - "background-color: {color}; " - "color: {color}" - ).format(color=input_color) + f"border-top-color: {input_color}; " + f"border-right-color: {input_color}; " + f"border-bottom-color: {input_color}; " + f"border-left-color: {input_color}; " + f"background-color: {input_color}; " + f"color: {input_color}" + ) expected = dict()
- [ ] xref #29547 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30612
2020-01-02T02:00:41Z
2020-01-03T00:36:40Z
2020-01-03T00:36:40Z
2020-01-03T00:36:54Z
Improve ASV Environment Creation Performance
diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index c04bbf53a86a6..902b472304909 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -122,5 +122,8 @@ ".*": "0409521665" }, "regression_thresholds": { - } + }, + "build_command": + ["python setup.py build -j4", + "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}"], }
...by leveraging a lot of the recent work to enable parallel CI Overriding default values documented here: https://asv.readthedocs.io/en/stable/asv.conf.json.html#build-command-install-command-uninstall-command I've picked 4 as the value assuming most people running benchmarks will have 2-4 cores. YMMV
https://api.github.com/repos/pandas-dev/pandas/pulls/30611
2020-01-02T00:46:35Z
2020-01-02T15:41:12Z
2020-01-02T15:41:12Z
2020-01-16T00:33:27Z
CLN: remove warnings clearing
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 57368a799138a..ec25f022f5a9e 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -26,7 +26,6 @@ Timestamp, date_range, ) -import pandas.core.arrays.datetimelike as dtl from pandas.core.indexes.datetimes import _to_M8 from pandas.core.ops import roperator from pandas.tests.arithmetic.common import ( @@ -1332,7 +1331,7 @@ def test_dt64arr_add_mixed_offset_array(self, box_with_array): s = tm.box_expected(s, box_with_array) warn = None if box_with_array is pd.DataFrame else PerformanceWarning - with tm.assert_produces_warning(warn, clear=[dtl]): + with tm.assert_produces_warning(warn): other = pd.Index([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]) other = tm.box_expected(other, box_with_array) result = s + other @@ -1361,7 +1360,7 @@ def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array): other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) warn = None if box_with_array is pd.DataFrame else PerformanceWarning - with tm.assert_produces_warning(warn, clear=[dtl]): + with tm.assert_produces_warning(warn): res = dtarr + other expected = DatetimeIndex( [dti[n] + other[n] for n in range(len(dti))], name=dti.name, freq="infer" @@ -1369,11 +1368,11 @@ def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array): expected = tm.box_expected(expected, box_with_array) tm.assert_equal(res, expected) - with tm.assert_produces_warning(warn, clear=[dtl]): + with tm.assert_produces_warning(warn): res2 = other + dtarr tm.assert_equal(res2, expected) - with tm.assert_produces_warning(warn, clear=[dtl]): + with tm.assert_produces_warning(warn): res = dtarr - other expected = DatetimeIndex( [dti[n] - other[n] for n in range(len(dti))], name=dti.name, freq="infer" @@ -2298,7 +2297,7 @@ def test_dti_addsub_offset_arraylike( xbox = get_upcast_box(box, other) - with tm.assert_produces_warning(PerformanceWarning, clear=[dtl]): + with tm.assert_produces_warning(PerformanceWarning): res = op(dti, other) expected = DatetimeIndex(
the tm.assert_produces_warning `clear` kwarg is no longer used, we could consider removing it
https://api.github.com/repos/pandas-dev/pandas/pulls/30608
2020-01-02T00:11:05Z
2020-01-02T02:36:50Z
2020-01-02T02:36:50Z
2020-01-02T02:40:33Z
REF: Delegate more methods for DTI/TDI/PI
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 11f4131df62a6..87a76b8681da4 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -445,7 +445,7 @@ def _formatter(self, boxed=False): return _get_format_timedelta64(self, box=True) - def _format_native_types(self, na_rep="NaT", date_format=None): + def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): from pandas.io.formats.format import _get_format_timedelta64 formatter = _get_format_timedelta64(self._data, na_rep) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f8e8a7037b9c4..f6f46d7e66c69 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1,5 +1,6 @@ -from datetime import datetime, time, timedelta +from datetime import datetime, time, timedelta, tzinfo import operator +from typing import Optional import warnings import numpy as np @@ -66,8 +67,13 @@ class DatetimeDelegateMixin(DatetimelikeDelegateMixin): # We also have a few "extra" attrs, which may or may not be raw, # which we we dont' want to expose in the .dt accessor. _extra_methods = ["to_period", "to_perioddelta", "to_julian_date", "strftime"] - _extra_raw_methods = ["to_pydatetime", "_local_timestamps", "_has_same_tz"] - _extra_raw_properties = ["_box_func", "tz", "tzinfo"] + _extra_raw_methods = [ + "to_pydatetime", + "_local_timestamps", + "_has_same_tz", + "_format_native_types", + ] + _extra_raw_properties = ["_box_func", "tz", "tzinfo", "dtype"] _delegated_properties = DatetimeArray._datetimelike_ops + _extra_raw_properties _delegated_methods = ( DatetimeArray._datetimelike_methods + _extra_methods + _extra_raw_methods @@ -88,7 +94,7 @@ class DatetimeDelegateMixin(DatetimelikeDelegateMixin): DatetimeArray, DatetimeDelegateMixin._delegated_methods, typ="method", - overwrite=False, + overwrite=True, ) class DatetimeIndex(DatetimeTimedeltaMixin, DatetimeDelegateMixin): """ @@ -197,8 +203,6 @@ class DatetimeIndex(DatetimeTimedeltaMixin, DatetimeDelegateMixin): _engine_type = libindex.DatetimeEngine _supports_partial_string_indexing = True - _tz = None - _freq = None _comparables = ["name", "freqstr", "tz"] _attributes = ["name", "tz", "freq"] @@ -214,6 +218,8 @@ class DatetimeIndex(DatetimeTimedeltaMixin, DatetimeDelegateMixin): _datetimelike_ops = DatetimeArray._datetimelike_ops _datetimelike_methods = DatetimeArray._datetimelike_methods + tz: Optional[tzinfo] + # -------------------------------------------------------------------- # Constructors @@ -310,25 +316,6 @@ def __array__(self, dtype=None): dtype = "M8[ns]" return np.asarray(self._data, dtype=dtype) - @property - def dtype(self): - return self._data.dtype - - @property - def tz(self): - # GH 18595 - return self._data.tz - - @tz.setter - def tz(self, value): - # GH 3746: Prevent localizing or converting the index by setting tz - raise AttributeError( - "Cannot directly set timezone. Use tz_localize() " - "or tz_convert() as appropriate" - ) - - tzinfo = tz - @cache_readonly def _is_dates_only(self) -> bool: """ @@ -401,15 +388,6 @@ def _mpl_repr(self): # how to represent ourselves to matplotlib return libts.ints_to_pydatetime(self.asi8, self.tz) - def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): - from pandas.io.formats.format import _get_format_datetime64_from_values - - fmt = _get_format_datetime64_from_values(self, date_format) - - return libts.format_array_from_datetime( - self.asi8, tz=self.tz, format=fmt, na_rep=na_rep - ) - @property def _formatter_func(self): from pandas.io.formats.format import _get_format_datetime64 @@ -999,10 +977,6 @@ def __getitem__(self, key): return result return type(self)(result, name=self.name) - @property - def _box_func(self): - return lambda x: Timestamp(x, tz=self.tz) - # -------------------------------------------------------------------- @Substitution(klass="DatetimeIndex") diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 0cd4b4d4bca8d..022e3ba674a27 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -66,12 +66,11 @@ class PeriodDelegateMixin(DatetimelikeDelegateMixin): """ _delegate_class = PeriodArray - _delegated_properties = PeriodArray._datetimelike_ops - _delegated_methods = set(PeriodArray._datetimelike_methods) | { - "_addsub_int_array", - "strftime", - } - _raw_properties = {"is_leap_year"} + _raw_methods = {"_format_native_types"} + _raw_properties = {"is_leap_year", "freq"} + + _delegated_properties = PeriodArray._datetimelike_ops + list(_raw_properties) + _delegated_methods = set(PeriodArray._datetimelike_methods) | _raw_methods @delegate_names(PeriodArray, PeriodDelegateMixin._delegated_properties, typ="property") @@ -262,10 +261,6 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs): def values(self): return np.asarray(self) - @property - def freq(self) -> DateOffset: - return self._data.freq - def _shallow_copy(self, values=None, **kwargs): # TODO: simplify, figure out type of values if values is None: @@ -363,10 +358,6 @@ def _maybe_convert_timedelta(self, other): # ------------------------------------------------------------------------ # Rendering Methods - def _format_native_types(self, na_rep="NaT", quoting=None, **kwargs): - # just dispatch, return ndarray - return self._data._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs) - def _mpl_repr(self): # how to represent ourselves to matplotlib return self.astype(object).values diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 8dd8bd8642354..795b4836b9a2a 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -41,18 +41,15 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin): # We also have a few "extra" attrs, which may or may not be raw, # which we don't want to expose in the .dt accessor. _delegate_class = TimedeltaArray - _delegated_properties = TimedeltaArray._datetimelike_ops + ["components"] - _delegated_methods = TimedeltaArray._datetimelike_methods + [ - "_box_values", - "__neg__", - "__pos__", - "__abs__", - "sum", - "std", - "median", - ] - _raw_properties = {"components"} - _raw_methods = {"to_pytimedelta", "sum", "std", "median"} + _raw_properties = {"components", "_box_func"} + _raw_methods = {"to_pytimedelta", "sum", "std", "median", "_format_native_types"} + + _delegated_properties = TimedeltaArray._datetimelike_ops + list(_raw_properties) + _delegated_methods = ( + TimedeltaArray._datetimelike_methods + + list(_raw_methods) + + ["_box_values", "__neg__", "__pos__", "__abs__"] + ) @delegate_names( @@ -225,22 +222,9 @@ def _formatter_func(self): return _get_format_timedelta64(self, box=True) - def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): - from pandas.io.formats.format import Timedelta64Formatter - - return np.asarray( - Timedelta64Formatter( - values=self, nat_rep=na_rep, justify="all" - ).get_result() - ) - # ------------------------------------------------------------------- # Wrapping TimedeltaArray - @property - def _box_func(self): - return lambda x: Timedelta(x, unit="ns") - def __getitem__(self, key): result = self._data.__getitem__(key) if is_scalar(result):
https://api.github.com/repos/pandas-dev/pandas/pulls/30607
2020-01-01T23:43:35Z
2020-01-02T02:38:26Z
2020-01-02T02:38:26Z
2020-01-02T02:42:23Z
CLN: remove CategoricalIndex.itemsize, should have gone in #29918
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 53051baa8e67e..5f4bd801429a4 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -302,7 +302,7 @@ class Categorical(ExtensionArray, PandasObject): __array_priority__ = 1000 _dtype = CategoricalDtype(ordered=False) # tolist is not actually deprecated, just suppressed in the __dir__ - _deprecations = PandasObject._deprecations | frozenset(["tolist", "itemsize"]) + _deprecations = PandasObject._deprecations | frozenset(["tolist"]) _typ = "categorical" def __init__( diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 531014e4affec..5552efefa1c4c 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -361,11 +361,6 @@ def values(self): """ return the underlying data, which is a Categorical """ return self._data - @property - def itemsize(self): - # Size of the items in categories, not codes. - return self.values.itemsize - def _wrap_setop_result(self, other, result): name = get_op_result_name(self, other) return self._shallow_copy(result, name=name)
https://api.github.com/repos/pandas-dev/pandas/pulls/30606
2020-01-01T22:34:38Z
2020-01-02T01:02:23Z
2020-01-02T01:02:23Z
2020-01-02T01:44:41Z
REF: delegate attrs for CategoricalIndex, IntervalIndex
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 531014e4affec..827ceb04cb712 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -10,7 +10,7 @@ from pandas._typing import AnyArrayLike import pandas.compat as compat from pandas.compat.numpy import function as nv -from pandas.util._decorators import Appender, Substitution, cache_readonly +from pandas.util._decorators import Appender, cache_readonly from pandas.core.dtypes.common import ( ensure_platform_int, @@ -26,7 +26,6 @@ from pandas.core import accessor from pandas.core.algorithms import take_1d from pandas.core.arrays.categorical import Categorical, _recode_for_categories, contains -from pandas.core.base import _shared_docs import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name @@ -37,6 +36,12 @@ _index_doc_kwargs.update(dict(target_klass="CategoricalIndex")) +@accessor.delegate_names( + delegate=Categorical, + accessors=["codes", "categories", "ordered"], + typ="property", + overwrite=True, +) @accessor.delegate_names( delegate=Categorical, accessors=[ @@ -50,6 +55,12 @@ "as_unordered", "min", "max", + "is_dtype_equal", + "tolist", + "_internal_get_values", + "_reverse_indexer", + "searchsorted", + "argsort", ], typ="method", overwrite=True, @@ -147,6 +158,20 @@ class CategoricalIndex(Index, accessor.PandasDelegate): _typ = "categoricalindex" + _raw_inherit = { + "argsort", + "_internal_get_values", + "tolist", + "codes", + "categories", + "ordered", + "_reverse_indexer", + "searchsorted", + } + + codes: np.ndarray + categories: Index + @property def _engine_type(self): # self.codes can have dtype int8, int16, int32 or int64, so we need @@ -370,29 +395,6 @@ def _wrap_setop_result(self, other, result): name = get_op_result_name(self, other) return self._shallow_copy(result, name=name) - def _internal_get_values(self): - # override base Index version to get the numpy array representation of - # the underlying Categorical - return self._data._internal_get_values() - - def tolist(self): - return self._data.tolist() - - @property - def codes(self): - return self._data.codes - - @property - def categories(self): - return self._data.categories - - @property - def ordered(self): - return self._data.ordered - - def _reverse_indexer(self): - return self._data._reverse_indexer() - @Appender(_index_shared_docs["contains"] % _index_doc_kwargs) def __contains__(self, key) -> bool: # if key is a NaN, check if any NaN is in self. @@ -429,9 +431,6 @@ def fillna(self, value, downcast=None): self._assert_can_do_op(value) return CategoricalIndex(self._data.fillna(value), name=self.name) - def argsort(self, *args, **kwargs): - return self.values.argsort(*args, **kwargs) - @cache_readonly def _engine(self): # we are going to look things up with the codes themselves. @@ -539,11 +538,6 @@ def get_value(self, series: AnyArrayLike, key: Any): # we might be a positional inexer return super().get_value(series, key) - @Substitution(klass="CategoricalIndex") - @Appender(_shared_docs["searchsorted"]) - def searchsorted(self, value, side="left", sorter=None): - return self._data.searchsorted(value, side=side, sorter=sorter) - @Appender(_index_shared_docs["where"]) def where(self, cond, other=None): # TODO: Investigate an alternative implementation with @@ -746,9 +740,6 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): ) return self._create_from_codes(taken) - def is_dtype_equal(self, other): - return self._data.is_dtype_equal(other) - take_nd = take @Appender(_index_shared_docs["_maybe_cast_slice_bound"]) @@ -882,10 +873,6 @@ def _concat_same_dtype(self, to_concat, name): result.name = name return result - def _codes_for_groupby(self, sort, observed): - """ Return a Categorical adjusted for groupby """ - return self.values._codes_for_groupby(sort, observed) - @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ @@ -911,13 +898,18 @@ def _evaluate_compare(self, other): cls.__le__ = _make_compare(operator.le) cls.__ge__ = _make_compare(operator.ge) + def _delegate_property_get(self, name, *args, **kwargs): + """ method delegation to the ._values """ + prop = getattr(self._values, name) + return prop # no wrapping for now + def _delegate_method(self, name, *args, **kwargs): """ method delegation to the ._values """ method = getattr(self._values, name) if "inplace" in kwargs: raise ValueError("cannot use inplace with CategoricalIndex") res = method(*args, **kwargs) - if is_scalar(res): + if is_scalar(res) or name in self._raw_inherit: return res return CategoricalIndex(res, name=self.name) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 52df491725504..abc82dd3c73f5 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -37,6 +37,7 @@ from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna +from pandas.core import accessor from pandas.core.algorithms import take_1d from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs import pandas.core.common as com @@ -181,7 +182,28 @@ def func(intvidx_self, other, sort=False): ), ) ) -class IntervalIndex(IntervalMixin, Index): +@accessor.delegate_names( + delegate=IntervalArray, + accessors=[ + "_ndarray_values", + "length", + "size", + "left", + "right", + "mid", + "closed", + "dtype", + ], + typ="property", + overwrite=True, +) +@accessor.delegate_names( + delegate=IntervalArray, + accessors=["__array__", "overlaps", "contains"], + typ="method", + overwrite=True, +) +class IntervalIndex(IntervalMixin, Index, accessor.PandasDelegate): _typ = "intervalindex" _comparables = ["name"] _attributes = ["name", "closed"] @@ -192,6 +214,8 @@ class IntervalIndex(IntervalMixin, Index): # Immutable, so we are able to cache computations like isna in '_mask' _mask = None + _raw_inherit = {"_ndarray_values", "__array__", "overlaps", "contains"} + # -------------------------------------------------------------------- # Constructors @@ -388,30 +412,6 @@ def to_tuples(self, na_tuple=True): def _multiindex(self): return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"]) - @property - def left(self): - """ - Return the left endpoints of each Interval in the IntervalIndex as - an Index. - """ - return self._data._left - - @property - def right(self): - """ - Return the right endpoints of each Interval in the IntervalIndex as - an Index. - """ - return self._data._right - - @property - def closed(self): - """ - Whether the intervals are closed on the left-side, right-side, both or - neither. - """ - return self._data._closed - @Appender( _interval_shared_docs["set_closed"] % dict( @@ -434,25 +434,8 @@ def closed(self): ) ) def set_closed(self, closed): - if closed not in _VALID_CLOSED: - raise ValueError(f"invalid option for 'closed': {closed}") - - # return self._shallow_copy(closed=closed) array = self._data.set_closed(closed) - return self._simple_new(array, self.name) - - @property - def length(self): - """ - Return an Index with entries denoting the length of each Interval in - the IntervalIndex. - """ - return self._data.length - - @property - def size(self): - # Avoid materializing ndarray[Interval] - return self._data.size + return self._simple_new(array, self.name) # TODO: can we use _shallow_copy? def __len__(self) -> int: return len(self.left) @@ -468,16 +451,6 @@ def values(self): def _values(self): return self._data - @cache_readonly - def _ndarray_values(self) -> np.ndarray: - return np.array(self._data) - - def __array__(self, result=None): - """ - The array interface, return my values. - """ - return self._ndarray_values - def __array_wrap__(self, result, context=None): # we don't want the superclass implementation return result @@ -506,13 +479,6 @@ def astype(self, dtype, copy=True): return self._shallow_copy(new_values.left, new_values.right) return super().astype(dtype, copy=copy) - @cache_readonly - def dtype(self): - """ - Return the dtype object of the underlying data. - """ - return self._data.dtype - @property def inferred_type(self) -> str: """Return a string of the type inferred from the values""" @@ -1177,44 +1143,6 @@ def equals(self, other) -> bool: and self.closed == other.closed ) - @Appender( - _interval_shared_docs["contains"] - % dict( - klass="IntervalIndex", - examples=textwrap.dedent( - """\ - >>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)]) - >>> intervals - IntervalIndex([(0, 1], (1, 3], (2, 4]], - closed='right', - dtype='interval[int64]') - >>> intervals.contains(0.5) - array([ True, False, False]) - """ - ), - ) - ) - def contains(self, other): - return self._data.contains(other) - - @Appender( - _interval_shared_docs["overlaps"] - % dict( - klass="IntervalIndex", - examples=textwrap.dedent( - """\ - >>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)]) - >>> intervals - IntervalIndex([(0, 1], (1, 3], (2, 4]], - closed='right', - dtype='interval[int64]') - """ - ), - ) - ) - def overlaps(self, other): - return self._data.overlaps(other) - @Appender(_index_shared_docs["intersection"]) @SetopCheck(op_name="intersection") def intersection( @@ -1314,6 +1242,19 @@ def is_all_dates(self) -> bool: # TODO: arithmetic operations + def _delegate_property_get(self, name, *args, **kwargs): + """ method delegation to the ._values """ + prop = getattr(self._data, name) + return prop # no wrapping for now + + def _delegate_method(self, name, *args, **kwargs): + """ method delegation to the ._data """ + method = getattr(self._data, name) + res = method(*args, **kwargs) + if is_scalar(res) or name in self._raw_inherit: + return res + return type(self)(res, name=self.name) + IntervalIndex._add_logical_methods_disabled()
Working towards creating an ExtensionIndexMixin to be shared by all our backed-be-EA indexes, which would ideally grow into ExtensionIndex. Removes CategoricalIndex._codes_for_groupby, which would raise AttributeError if it were ever called.
https://api.github.com/repos/pandas-dev/pandas/pulls/30605
2020-01-01T22:30:17Z
2020-01-02T13:55:53Z
2020-01-02T13:55:53Z
2020-01-02T16:52:59Z
CLN: Replace old format strings to f-strings in pandas/tests/base
diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py index 4b6349a505509..4295d89869a72 100644 --- a/pandas/tests/base/test_conversion.py +++ b/pandas/tests/base/test_conversion.py @@ -288,7 +288,7 @@ def test_numpy_array_all_dtypes(any_numpy_dtype): def test_array(array, attr, index_or_series): box = index_or_series if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index: - pytest.skip("No index type for {}".format(array.dtype)) + pytest.skip(f"No index type for {array.dtype}") result = box(array, copy=False).array if attr: @@ -354,7 +354,7 @@ def test_to_numpy(array, expected, index_or_series): thing = box(array) if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index: - pytest.skip("No index type for {}".format(array.dtype)) + pytest.skip(f"No index type for {array.dtype}") result = thing.to_numpy() tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py index 4231aa844f282..0d8c280d91256 100644 --- a/pandas/tests/base/test_ops.py +++ b/pandas/tests/base/test_ops.py @@ -62,8 +62,8 @@ def setup_method(self, method): self.unicode_series = Series(arr, index=self.unicode_index, name="a") types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"] - self.indexes = [getattr(self, "{}_index".format(t)) for t in types] - self.series = [getattr(self, "{}_series".format(t)) for t in types] + self.indexes = [getattr(self, f"{t}_index") for t in types] + self.series = [getattr(self, f"{t}_series") for t in types] # To test narrow dtypes, we use narrower *data* elements, not *index* elements index = self.int_index @@ -79,7 +79,7 @@ def setup_method(self, method): self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a") nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"] - self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types] + self.narrow_series = [getattr(self, f"{t}_series") for t in nrw_types] self.objs = self.indexes + self.series + self.narrow_series
- [x] Contributes to #29547 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30604
2020-01-01T19:46:24Z
2020-01-01T20:47:45Z
2020-01-01T20:47:45Z
2020-01-04T22:19:51Z
Added 'pearson' to methods list in pandas/core/nanops.py
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 584972f2b2dd5..6b03e76a1d691 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1243,8 +1243,14 @@ def nancorr(a, b, method="pearson", min_periods=None): def get_corr_func(method): if method in ["kendall", "spearman"]: from scipy.stats import kendalltau, spearmanr + elif method in ["pearson"]: + pass elif callable(method): return method + else: + raise ValueError( + f"Unkown method '{method}', expected one of 'kendall', 'spearman'" + ) def _pearson(a, b): return np.corrcoef(a, b)[0, 1] diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index b2bccbeb82c27..2c5d028ebe42e 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -598,6 +598,14 @@ def test_nancorr_spearman(self): targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman") + @td.skip_if_no_scipy + def test_invalid_method(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + msg = "Unkown method 'foo', expected one of 'kendall', 'spearman'" + with pytest.raises(ValueError, match=msg): + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="foo") + def test_nancov(self): targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1] targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
- [x] ref https://github.com/pandas-dev/pandas/pull/30461#discussion_r362323260 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30603
2020-01-01T19:32:52Z
2020-01-07T23:17:53Z
2020-01-07T23:17:53Z
2020-01-08T20:30:05Z
TST: Add test for TypeError when using datetime.time in scatter plot
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index a9ab9d84dbc2f..c2a289b2772ba 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -1162,6 +1162,15 @@ def test_plot_scatter(self): axes = df.plot(x="x", y="y", kind="scatter", subplots=True) self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) + def test_raise_error_on_datetime_time_data(self): + # GH 8113, datetime.time type is not supported by matplotlib in scatter + df = pd.DataFrame(np.random.randn(10), columns=["a"]) + df["dtime"] = pd.date_range(start="2014-01-01", freq="h", periods=10).time + msg = "must be a string or a number, not 'datetime.time'" + + with pytest.raises(TypeError, match=msg): + df.plot(kind="scatter", x="dtime", y="a") + def test_scatterplot_datetime_data(self): # GH 30391 dates = pd.date_range(start=date(2019, 1, 1), periods=12, freq="W")
- [ ] closes #8113 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30602
2020-01-01T15:52:57Z
2020-01-01T16:26:02Z
2020-01-01T16:26:02Z
2020-01-01T16:26:05Z
CLN: Use fstring instead of .format in io/excel and test/generic
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 553334407d12e..fe13fce83161d 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -297,9 +297,7 @@ def read_excel( for arg in ("sheet", "sheetname", "parse_cols"): if arg in kwds: - raise TypeError( - "read_excel() got an unexpected keyword argument `{}`".format(arg) - ) + raise TypeError(f"read_excel() got an unexpected keyword argument `{arg}`") if not isinstance(io, ExcelFile): io = ExcelFile(io, engine=engine) @@ -429,7 +427,7 @@ def parse( for asheetname in sheets: if verbose: - print("Reading sheet {sheet}".format(sheet=asheetname)) + print(f"Reading sheet {asheetname}") if isinstance(asheetname, str): sheet = self.get_sheet_by_name(asheetname) @@ -622,11 +620,11 @@ def __new__(cls, path, engine=None, **kwargs): ext = "xlsx" try: - engine = config.get_option("io.excel.{ext}.writer".format(ext=ext)) + engine = config.get_option(f"io.excel.{ext}.writer") if engine == "auto": engine = _get_default_writer(ext) except KeyError: - raise ValueError("No engine for filetype: '{ext}'".format(ext=ext)) + raise ValueError(f"No engine for filetype: '{ext}'") cls = get_writer(engine) return object.__new__(cls) @@ -757,9 +755,8 @@ def check_extension(cls, ext): if ext.startswith("."): ext = ext[1:] if not any(ext in extension for extension in cls.supported_extensions): - msg = "Invalid extension for engine '{engine}': '{ext}'".format( - engine=pprint_thing(cls.engine), ext=pprint_thing(ext) - ) + msg = "Invalid extension for engine" + f"'{pprint_thing(cls.engine)}': '{pprint_thing(ext)}'" raise ValueError(msg) else: return True @@ -802,7 +799,7 @@ def __init__(self, io, engine=None): if engine is None: engine = "xlrd" if engine not in self._engines: - raise ValueError("Unknown engine: {engine}".format(engine=engine)) + raise ValueError(f"Unknown engine: {engine}") self.engine = engine # could be a str, ExcelFile, Book, etc. diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 6b9943136664a..2afb41e7bdc7e 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -178,4 +178,4 @@ def _get_cell_value(self, cell, convert_float: bool) -> Scalar: elif cell_type == "time": return pd.to_datetime(str(cell)).time() else: - raise ValueError("Unrecognized type {}".format(cell_type)) + raise ValueError(f"Unrecognized type {cell_type}") diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 7a264ed2b0850..be52523e486af 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -99,7 +99,7 @@ def _convert_to_style_kwargs(cls, style_dict): for k, v in style_dict.items(): if k in _style_key_map: k = _style_key_map[k] - _conv_to_x = getattr(cls, "_convert_to_{k}".format(k=k), lambda x: None) + _conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None) new_v = _conv_to_x(v) if new_v: style_kwargs[k] = new_v diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py index ee617d2013136..8cd4b2012cb42 100644 --- a/pandas/io/excel/_util.py +++ b/pandas/io/excel/_util.py @@ -48,7 +48,7 @@ def get_writer(engine_name): try: return _writers[engine_name] except KeyError: - raise ValueError("No Excel writer '{engine}'".format(engine=engine_name)) + raise ValueError(f"No Excel writer '{engine_name}'") def _excel2num(x): @@ -76,7 +76,7 @@ def _excel2num(x): cp = ord(c) if cp < ord("A") or cp > ord("Z"): - raise ValueError("Invalid column name: {x}".format(x=x)) + raise ValueError(f"Invalid column name: {x}") index = index * 26 + cp - ord("A") + 1 diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py index 996ae1caa14c8..d102a885cef0a 100644 --- a/pandas/io/excel/_xlwt.py +++ b/pandas/io/excel/_xlwt.py @@ -97,20 +97,20 @@ def _style_to_xlwt( if hasattr(item, "items"): if firstlevel: it = [ - "{key}: {val}".format(key=key, val=cls._style_to_xlwt(value, False)) + f"{key}: {cls._style_to_xlwt(value, False)}" for key, value in item.items() ] - out = "{sep} ".format(sep=(line_sep).join(it)) + out = f"{(line_sep).join(it)} " return out else: it = [ - "{key} {val}".format(key=key, val=cls._style_to_xlwt(value, False)) + f"{key} {cls._style_to_xlwt(value, False)}" for key, value in item.items() ] - out = "{sep} ".format(sep=(field_sep).join(it)) + out = f"{(field_sep).join(it)} " return out else: - item = "{item}".format(item=item) + item = f"{item}" item = item.replace("True", "on") item = item.replace("False", "off") return item diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py index 270a7c70a2e81..54c7e450c5cd6 100644 --- a/pandas/tests/generic/test_frame.py +++ b/pandas/tests/generic/test_frame.py @@ -196,7 +196,7 @@ def test_set_attribute(self): def test_to_xarray_index_types(self, index): from xarray import Dataset - index = getattr(tm, "make{}".format(index)) + index = getattr(tm, f"make{index}") df = DataFrame( { "a": list("abc"), diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 0ff9d7fcdb209..d0c9b3e7a8f76 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -125,7 +125,7 @@ def test_nonzero(self): # GH 4633 # look at the boolean/nonzero behavior for objects obj = self._construct(shape=4) - msg = "The truth value of a {} is ambiguous".format(self._typ.__name__) + msg = f"The truth value of a {self._typ.__name__} is ambiguous" with pytest.raises(ValueError, match=msg): bool(obj == 0) with pytest.raises(ValueError, match=msg): @@ -203,9 +203,9 @@ def test_constructor_compound_dtypes(self): def f(dtype): return self._construct(shape=3, value=1, dtype=dtype) - msg = "compound dtypes are not implemented in the {} constructor".format( - self._typ.__name__ - ) + msg = "compound dtypes are not implemented" + f"in the {self._typ.__name__} constructor" + with pytest.raises(NotImplementedError, match=msg): f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py index aaf523956aaed..601fc2aa64434 100644 --- a/pandas/tests/generic/test_series.py +++ b/pandas/tests/generic/test_series.py @@ -205,7 +205,7 @@ def finalize(self, other, method=None, **kwargs): def test_to_xarray_index_types(self, index): from xarray import DataArray - index = getattr(tm, "make{}".format(index)) + index = getattr(tm, f"make{index}") s = Series(range(6), index=index(6)) s.index.name = "foo" result = s.to_xarray()
- [x] Contributes to #29547 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30601
2020-01-01T14:25:47Z
2020-01-01T16:07:12Z
2020-01-01T16:07:11Z
2020-01-01T16:07:15Z
BUG: Ensure df.itertuples() uses plain tuples correctly
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index b194f20c3c433..5af426d07de14 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -987,6 +987,7 @@ Other - Bug in :class:`Index` where a non-hashable name could be set without raising ``TypeError`` (:issue:`29069`) - Bug in :class:`DataFrame` constructor when passing a 2D ``ndarray`` and an extension dtype (:issue:`12513`) - Bug in :meth:`DaataFrame.to_csv` when supplied a series with a ``dtype="string"`` and a ``na_rep``, the ``na_rep`` was being truncated to 2 characters. (:issue:`29975`) +- Bug where :meth:`DataFrame.itertuples` would incorrectly determine whether or not namedtuples could be used for dataframes of 255 columns (:issue:`28282`) .. _whatsnew_1000.contributors: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d4676a998c948..b69199defbcc4 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -38,6 +38,7 @@ from pandas._libs import algos as libalgos, lib from pandas._typing import Axes, Dtype, FilePathOrBuffer +from pandas.compat import PY37 from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import ( @@ -975,7 +976,8 @@ def itertuples(self, index=True, name="Pandas"): ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. - With a large number of columns (>255), regular tuples are returned. + On python versions < 3.7 regular tuples are returned for DataFrames + with a large number of columns (>254). Examples -------- @@ -1018,8 +1020,9 @@ def itertuples(self, index=True, name="Pandas"): # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) - # Python 3 supports at most 255 arguments to constructor - if name is not None and len(self.columns) + index < 256: + # Python versions before 3.7 support at most 255 arguments to constructors + can_return_named_tuples = PY37 or len(self.columns) + index < 255 + if name is not None and can_return_named_tuples: itertuple = collections.namedtuple(name, fields, rename=True) return map(itertuple._make, zip(*arrays)) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 91fb71c9de7a4..f6713d703e112 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -5,6 +5,8 @@ import numpy as np import pytest +from pandas.compat import PY37 + import pandas as pd from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range import pandas.util.testing as tm @@ -261,8 +263,27 @@ def test_itertuples(self, float_frame): df3 = DataFrame({"f" + str(i): [i] for i in range(1024)}) # will raise SyntaxError if trying to create namedtuple tup3 = next(df3.itertuples()) - assert not hasattr(tup3, "_fields") assert isinstance(tup3, tuple) + if PY37: + assert hasattr(tup3, "_fields") + else: + assert not hasattr(tup3, "_fields") + + # GH 28282 + df_254_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(254)}]) + result_254_columns = next(df_254_columns.itertuples(index=False)) + assert isinstance(result_254_columns, tuple) + assert hasattr(result_254_columns, "_fields") + + df_255_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(255)}]) + result_255_columns = next(df_255_columns.itertuples(index=False)) + assert isinstance(result_255_columns, tuple) + + # Dataframes with >=255 columns will fallback to regular tuples on python < 3.7 + if PY37: + assert hasattr(result_255_columns, "_fields") + else: + assert not hasattr(result_255_columns, "_fields") def test_sequence_like_with_categorical(self):
Currently DataFrame.itertuples() has an off by one error when it inspects whether or not it should return namedtuples or plain tuples in it's response. This PR addresses that bug by correcting the condition that is used when making the check. Closes: #28282 - [x] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30600
2020-01-01T11:53:53Z
2020-01-02T00:58:16Z
2020-01-02T00:58:16Z
2020-01-06T09:15:27Z
REF: share _wrap_joined_index
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 6a49f9f670aab..f957860240dd2 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -35,6 +35,7 @@ import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.numeric import Int64Index +from pandas.core.ops import get_op_result_name from pandas.core.tools.timedeltas import to_timedelta from pandas.tseries.frequencies import DateOffset, to_offset @@ -923,6 +924,22 @@ def _is_convertible_to_index_for_join(cls, other: Index) -> bool: return True return False + def _wrap_joined_index(self, joined, other): + name = get_op_result_name(self, other) + if ( + isinstance(other, type(self)) + and self.freq == other.freq + and self._can_fast_union(other) + ): + joined = self._shallow_copy(joined) + joined.name = name + return joined + else: + kwargs = {} + if hasattr(self, "tz"): + kwargs["tz"] = getattr(other, "tz", None) + return self._simple_new(joined, name, **kwargs) + def wrap_arithmetic_op(self, other, result): if result is NotImplemented: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index fafa9e95a5963..f8e8a7037b9c4 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -632,20 +632,6 @@ def snap(self, freq="S"): # we know it conforms; skip check return DatetimeIndex._simple_new(snapped, name=self.name, tz=self.tz, freq=freq) - def _wrap_joined_index(self, joined, other): - name = get_op_result_name(self, other) - if ( - isinstance(other, DatetimeIndex) - and self.freq == other.freq - and self._can_fast_union(other) - ): - joined = self._shallow_copy(joined) - joined.name = name - return joined - else: - tz = getattr(other, "tz", None) - return self._simple_new(joined, name, tz=tz) - def _parsed_string_to_bounds(self, reso, parsed): """ Calculate datetime bounds for parsed time string and its resolution. diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index e6790d092778f..8dd8bd8642354 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -31,7 +31,6 @@ DatetimelikeDelegateMixin, DatetimeTimedeltaMixin, ) -from pandas.core.ops import get_op_result_name from pandas.tseries.frequencies import to_offset @@ -283,18 +282,6 @@ def _union(self, other, sort): result._set_freq("infer") return result - def _wrap_joined_index(self, joined, other): - name = get_op_result_name(self, other) - if ( - isinstance(other, TimedeltaIndex) - and self.freq == other.freq - and self._can_fast_union(other) - ): - joined = self._shallow_copy(joined, name=name) - return joined - else: - return self._simple_new(joined, name) - def _fast_union(self, other): if len(other) == 0: return self.view(type(self))
this is the last of the trivial ones AFAICT. To get the others we're going to have to smooth out small differences in behavior, which should happen in non-refactoring PRs.
https://api.github.com/repos/pandas-dev/pandas/pulls/30599
2020-01-01T05:12:17Z
2020-01-01T16:03:07Z
2020-01-01T16:03:07Z
2020-01-01T16:32:50Z
CLN: Remove int32 and float32 dtypes from IntervalTree
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in index 316c9e5b7e5f0..d09413bfa5210 100644 --- a/pandas/_libs/intervaltree.pxi.in +++ b/pandas/_libs/intervaltree.pxi.in @@ -8,14 +8,11 @@ from pandas._libs.algos import is_monotonic ctypedef fused int_scalar_t: int64_t - int32_t float64_t - float32_t ctypedef fused uint_scalar_t: uint64_t float64_t - float32_t ctypedef fused scalar_t: int_scalar_t @@ -212,7 +209,7 @@ cdef sort_values_and_indices(all_values, all_indices, subset): {{py: nodes = [] -for dtype in ['float32', 'float64', 'int32', 'int64', 'uint64']: +for dtype in ['float64', 'int64', 'uint64']: for closed, cmp_left, cmp_right in [ ('left', '<=', '<'), ('right', '<', '<='), diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index 695a98777eadb..0a92192ee6a0f 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -20,9 +20,7 @@ def skipif_32bit(param): return pytest.param(param, marks=marks) -@pytest.fixture( - scope="class", params=["int32", "int64", "float32", "float64", "uint64"] -) +@pytest.fixture(scope="class", params=["int64", "float64", "uint64"]) def dtype(request): return request.param @@ -39,12 +37,9 @@ def leaf_size(request): @pytest.fixture( params=[ np.arange(5, dtype="int64"), - np.arange(5, dtype="int32"), np.arange(5, dtype="uint64"), np.arange(5, dtype="float64"), - np.arange(5, dtype="float32"), np.array([0, 1, 2, 3, 4, np.nan], dtype="float64"), - np.array([0, 1, 2, 3, 4, np.nan], dtype="float32"), ] ) def tree(request, leaf_size): @@ -64,13 +59,14 @@ def test_get_indexer(self, tree): tree.get_indexer(np.array([3.0])) @pytest.mark.parametrize( - "dtype, target_value", [("int64", 2 ** 63 + 1), ("uint64", -1)] + "dtype, target_value, target_dtype", + [("int64", 2 ** 63 + 1, "uint64"), ("uint64", -1, "int64")], ) - def test_get_indexer_overflow(self, dtype, target_value): + def test_get_indexer_overflow(self, dtype, target_value, target_dtype): left, right = np.array([0, 1], dtype=dtype), np.array([1, 2], dtype=dtype) tree = IntervalTree(left, right) - result = tree.get_indexer(np.array([target_value])) + result = tree.get_indexer(np.array([target_value], dtype=target_dtype)) expected = np.array([-1], dtype="intp") tm.assert_numpy_array_equal(result, expected) @@ -94,12 +90,13 @@ def test_get_indexer_non_unique(self, tree): tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( - "dtype, target_value", [("int64", 2 ** 63 + 1), ("uint64", -1)] + "dtype, target_value, target_dtype", + [("int64", 2 ** 63 + 1, "uint64"), ("uint64", -1, "int64")], ) - def test_get_indexer_non_unique_overflow(self, dtype, target_value): + def test_get_indexer_non_unique_overflow(self, dtype, target_value, target_dtype): left, right = np.array([0, 2], dtype=dtype), np.array([1, 3], dtype=dtype) tree = IntervalTree(left, right) - target = np.array([target_value]) + target = np.array([target_value], dtype=target_dtype) result_indexer, result_missing = tree.get_indexer_non_unique(target) expected_indexer = np.array([-1], dtype="intp") @@ -146,10 +143,10 @@ def test_get_indexer_closed(self, closed, leaf_size): @pytest.mark.parametrize( "left, right, expected", [ - (np.array([0, 1, 4]), np.array([2, 3, 5]), True), - (np.array([0, 1, 2]), np.array([5, 4, 3]), True), + (np.array([0, 1, 4], dtype="int64"), np.array([2, 3, 5]), True), + (np.array([0, 1, 2], dtype="int64"), np.array([5, 4, 3]), True), (np.array([0, 1, np.nan]), np.array([5, 4, np.nan]), True), - (np.array([0, 2, 4]), np.array([1, 3, 5]), False), + (np.array([0, 2, 4], dtype="int64"), np.array([1, 3, 5]), False), (np.array([0, 2, np.nan]), np.array([1, 3, np.nan]), False), ], ) @@ -164,7 +161,7 @@ def test_is_overlapping(self, closed, order, left, right, expected): def test_is_overlapping_endpoints(self, closed, order): """shared endpoints are marked as overlapping""" # GH 23309 - left, right = np.arange(3), np.arange(1, 4) + left, right = np.arange(3, dtype="int64"), np.arange(1, 4) tree = IntervalTree(left[order], right[order], closed=closed) result = tree.is_overlapping expected = closed == "both" @@ -187,7 +184,7 @@ def test_is_overlapping_trivial(self, closed, left, right): @pytest.mark.skipif(compat.is_platform_32bit(), reason="GH 23440") def test_construction_overflow(self): # GH 25485 - left, right = np.arange(101), [np.iinfo(np.int64).max] * 101 + left, right = np.arange(101, dtype="int64"), [np.iinfo(np.int64).max] * 101 tree = IntervalTree(left, right) # pivot should be average of left/right medians
There isn't a practical way to actually get an `IntervalTree` with `int32`/`float32` dtypes since `IntervalIndex` is backed by two pandas indexes and we don't have a `Int32Index` or `Float32Index`. These indexes are used to create the underlying `IntervalTree` that backs an `IntervalIndex`, so we're guaranteed to have `int64`/`float64` dtype data when initializing. The only way that comes to mind that a user could create an `IntervalTree` with `int32`/`float32` dtype would be by explicitly initializing a standalone `IntervalTree` with `int32`/`float32` arrays, which doesn't seem particularly likely. This should also help with build times as it results in 8 less node classes being generated. Didn't add a whatsnew note since I don't think `IntervalTree` is user facing (could be wrong?) but can add one if desired since this is technically a breaking change.
https://api.github.com/repos/pandas-dev/pandas/pulls/30598
2020-01-01T03:43:53Z
2020-01-01T18:19:08Z
2020-01-01T18:19:08Z
2020-01-01T21:14:22Z
REF: move inference/casting out of Index.__new__
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ec26e46a71e91..e9e3a5ef94a1f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -370,43 +370,12 @@ def __new__( subarr = subarr.copy() if dtype is None: - inferred = lib.infer_dtype(subarr, skipna=False) - if inferred == "integer": - try: - return cls._try_convert_to_int_index(subarr, copy, name, dtype) - except ValueError: - pass - - return Index(subarr, copy=copy, dtype=object, name=name) - elif inferred in ["floating", "mixed-integer-float", "integer-na"]: - # TODO: Returns IntegerArray for integer-na case in the future - return Float64Index(subarr, copy=copy, name=name) - elif inferred == "interval": - try: - return IntervalIndex(subarr, name=name, copy=copy) - except ValueError: - # GH27172: mixed closed Intervals --> object dtype - pass - elif inferred == "boolean": - # don't support boolean explicitly ATM - pass - elif inferred != "string": - if inferred.startswith("datetime"): - try: - return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) - except (ValueError, OutOfBoundsDatetime): - # GH 27011 - # If we have mixed timezones, just send it - # down the base constructor - pass - - elif inferred.startswith("timedelta"): - return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs) - elif inferred == "period": - try: - return PeriodIndex(subarr, name=name, **kwargs) - except IncompatibleFrequency: - pass + new_data, new_dtype = _maybe_cast_data_without_dtype(subarr) + if new_dtype is not None: + return cls( + new_data, dtype=new_dtype, copy=False, name=name, **kwargs + ) + if kwargs: raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}") return cls._simple_new(subarr, name, **kwargs) @@ -3806,50 +3775,6 @@ def where(self, cond, other=None): return self._shallow_copy_with_infer(values, dtype=dtype) # construction helpers - @classmethod - def _try_convert_to_int_index(cls, data, copy, name, dtype): - """ - Attempt to convert an array of data into an integer index. - - Parameters - ---------- - data : The data to convert. - copy : Whether to copy the data or not. - name : The name of the index returned. - - Returns - ------- - int_index : data converted to either an Int64Index or a - UInt64Index - - Raises - ------ - ValueError if the conversion was not successful. - """ - - from .numeric import Int64Index, UInt64Index - - if not is_unsigned_integer_dtype(dtype): - # skip int64 conversion attempt if uint-like dtype is passed, as - # this could return Int64Index when UInt64Index is what's desired - try: - res = data.astype("i8", copy=False) - if (res == data).all(): - return Int64Index(res, copy=copy, name=name) - except (OverflowError, TypeError, ValueError): - pass - - # Conversion to int64 failed (possibly due to overflow) or was skipped, - # so let's try now with uint64. - try: - res = data.astype("u8", copy=False) - if (res == data).all(): - return UInt64Index(res, copy=copy, name=name) - except (OverflowError, TypeError, ValueError): - pass - - raise ValueError - @classmethod def _scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor @@ -5509,6 +5434,77 @@ def _maybe_cast_with_dtype(data: np.ndarray, dtype: np.dtype, copy: bool) -> np. return data +def _maybe_cast_data_without_dtype(subarr): + """ + If we have an arraylike input but no passed dtype, try to infer + a supported dtype. + + Parameters + ---------- + subarr : np.ndarray, Index, or Series + + Returns + ------- + converted : np.ndarray or ExtensionArray + dtype : np.dtype or ExtensionDtype + """ + # Runtime import needed bc IntervalArray imports Index + from pandas.core.arrays import ( + IntervalArray, + PeriodArray, + DatetimeArray, + TimedeltaArray, + ) + + inferred = lib.infer_dtype(subarr, skipna=False) + + if inferred == "integer": + try: + data = _try_convert_to_int_array(subarr, False, None) + return data, data.dtype + except ValueError: + pass + + return subarr, object + + elif inferred in ["floating", "mixed-integer-float", "integer-na"]: + # TODO: Returns IntegerArray for integer-na case in the future + return subarr, np.float64 + + elif inferred == "interval": + try: + data = IntervalArray._from_sequence(subarr, copy=False) + return data, data.dtype + except ValueError: + # GH27172: mixed closed Intervals --> object dtype + pass + elif inferred == "boolean": + # don't support boolean explicitly ATM + pass + elif inferred != "string": + if inferred.startswith("datetime"): + try: + data = DatetimeArray._from_sequence(subarr, copy=False) + return data, data.dtype + except (ValueError, OutOfBoundsDatetime): + # GH 27011 + # If we have mixed timezones, just send it + # down the base constructor + pass + + elif inferred.startswith("timedelta"): + data = TimedeltaArray._from_sequence(subarr, copy=False) + return data, data.dtype + elif inferred == "period": + try: + data = PeriodArray._from_sequence(subarr) + return data, data.dtype + except IncompatibleFrequency: + pass + + return subarr, subarr.dtype + + def _try_convert_to_int_array( data: np.ndarray, copy: bool, dtype: np.dtype ) -> np.ndarray:
The new function operates only on arrays, not Indexes. This gets us close to being able to just use `pd.array` here.
https://api.github.com/repos/pandas-dev/pandas/pulls/30596
2020-01-01T03:38:59Z
2020-01-01T05:26:01Z
2020-01-01T05:26:00Z
2020-01-03T02:48:19Z
REF: share join methods for DTI/TDI
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 76814403af385..6a49f9f670aab 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -6,8 +6,9 @@ import numpy as np -from pandas._libs import NaT, iNaT, lib +from pandas._libs import NaT, iNaT, join as libjoin, lib from pandas._libs.algos import unique_deltas +from pandas._libs.tslibs import timezones from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, cache_readonly @@ -72,6 +73,32 @@ def method(self, other): return method +def _join_i8_wrapper(joinf, with_indexers: bool = True): + """ + Create the join wrapper methods. + """ + + @staticmethod # type: ignore + def wrapper(left, right): + if isinstance(left, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)): + left = left.view("i8") + if isinstance(right, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)): + right = right.view("i8") + + results = joinf(left, right) + if with_indexers: + # dtype should be timedelta64[ns] for TimedeltaIndex + # and datetime64[ns] for DatetimeIndex + dtype = left.dtype.base + + join_index, left_indexer, right_indexer = results + join_index = join_index.view(dtype) + return join_index, left_indexer, right_indexer + return results + + return wrapper + + class DatetimeIndexOpsMixin(ExtensionOpsMixin): """ Common ops mixin to support a unified interface datetimelike Index. @@ -208,32 +235,6 @@ def equals(self, other): return np.array_equal(self.asi8, other.asi8) - @staticmethod - def _join_i8_wrapper(joinf, dtype, with_indexers=True): - """ - Create the join wrapper methods. - """ - from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin - - @staticmethod - def wrapper(left, right): - if isinstance( - left, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin) - ): - left = left.view("i8") - if isinstance( - right, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin) - ): - right = right.view("i8") - results = joinf(left, right) - if with_indexers: - join_index, left_indexer, right_indexer = results - join_index = join_index.view(dtype) - return join_index, left_indexer, right_indexer - return results - - return wrapper - def _ensure_localized( self, arg, ambiguous="raise", nonexistent="raise", from_utc=False ): @@ -853,6 +854,75 @@ def _can_fast_union(self, other) -> bool: # this will raise return False + # -------------------------------------------------------------------- + # Join Methods + _join_precedence = 10 + + _inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer) + _outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer) + _left_indexer = _join_i8_wrapper(libjoin.left_join_indexer) + _left_indexer_unique = _join_i8_wrapper( + libjoin.left_join_indexer_unique, with_indexers=False + ) + + def join( + self, other, how: str = "left", level=None, return_indexers=False, sort=False + ): + """ + See Index.join + """ + if self._is_convertible_to_index_for_join(other): + try: + other = type(self)(other) + except (TypeError, ValueError): + pass + + this, other = self._maybe_utc_convert(other) + return Index.join( + this, + other, + how=how, + level=level, + return_indexers=return_indexers, + sort=sort, + ) + + def _maybe_utc_convert(self, other): + this = self + if not hasattr(self, "tz"): + return this, other + + if isinstance(other, type(self)): + if self.tz is not None: + if other.tz is None: + raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") + elif other.tz is not None: + raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") + + if not timezones.tz_compare(self.tz, other.tz): + this = self.tz_convert("UTC") + other = other.tz_convert("UTC") + return this, other + + @classmethod + def _is_convertible_to_index_for_join(cls, other: Index) -> bool: + """ + return a boolean whether I can attempt conversion to a + DatetimeIndex/TimedeltaIndex + """ + if isinstance(other, cls): + return False + elif len(other) > 0 and other.inferred_type not in ( + "floating", + "mixed-integer", + "integer", + "integer-na", + "mixed-integer-float", + "mixed", + ): + return True + return False + def wrap_arithmetic_op(self, other, result): if result is NotImplemented: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 53d2ed22cd631..fafa9e95a5963 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -5,7 +5,6 @@ import numpy as np from pandas._libs import NaT, Timestamp, index as libindex, lib, tslib as libts -import pandas._libs.join as libjoin from pandas._libs.tslibs import ccalendar, fields, parsing, timezones from pandas.util._decorators import Appender, Substitution, cache_readonly @@ -32,7 +31,6 @@ import pandas.core.common as com from pandas.core.indexes.base import Index, maybe_extract_name from pandas.core.indexes.datetimelike import ( - DatetimeIndexOpsMixin, DatetimelikeDelegateMixin, DatetimeTimedeltaMixin, ) @@ -195,17 +193,6 @@ class DatetimeIndex(DatetimeTimedeltaMixin, DatetimeDelegateMixin): """ _typ = "datetimeindex" - _join_precedence = 10 - - def _join_i8_wrapper(joinf, **kwargs): - return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype="M8[ns]", **kwargs) - - _inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer) - _outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer) - _left_indexer = _join_i8_wrapper(libjoin.left_join_indexer) - _left_indexer_unique = _join_i8_wrapper( - libjoin.left_join_indexer_unique, with_indexers=False - ) _engine_type = libindex.DatetimeEngine _supports_partial_string_indexing = True @@ -645,54 +632,6 @@ def snap(self, freq="S"): # we know it conforms; skip check return DatetimeIndex._simple_new(snapped, name=self.name, tz=self.tz, freq=freq) - def join( - self, other, how: str = "left", level=None, return_indexers=False, sort=False - ): - """ - See Index.join - """ - if ( - not isinstance(other, DatetimeIndex) - and len(other) > 0 - and other.inferred_type - not in ( - "floating", - "integer", - "integer-na", - "mixed-integer", - "mixed-integer-float", - "mixed", - ) - ): - try: - other = DatetimeIndex(other) - except (TypeError, ValueError): - pass - - this, other = self._maybe_utc_convert(other) - return Index.join( - this, - other, - how=how, - level=level, - return_indexers=return_indexers, - sort=sort, - ) - - def _maybe_utc_convert(self, other): - this = self - if isinstance(other, DatetimeIndex): - if self.tz is not None: - if other.tz is None: - raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") - elif other.tz is not None: - raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") - - if not timezones.tz_compare(self.tz, other.tz): - this = self.tz_convert("UTC") - other = other.tz_convert("UTC") - return this, other - def _wrap_joined_index(self, joined, other): name = get_op_result_name(self, other) if ( diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 65c3ece6000fc..e6790d092778f 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -3,7 +3,7 @@ import numpy as np -from pandas._libs import NaT, Timedelta, index as libindex, join as libjoin, lib +from pandas._libs import NaT, Timedelta, index as libindex, lib from pandas.util._decorators import Appender, Substitution from pandas.core.dtypes.common import ( @@ -121,17 +121,6 @@ class TimedeltaIndex( """ _typ = "timedeltaindex" - _join_precedence = 10 - - def _join_i8_wrapper(joinf, **kwargs): - return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype="m8[ns]", **kwargs) - - _inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer) - _outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer) - _left_indexer = _join_i8_wrapper(libjoin.left_join_indexer) - _left_indexer_unique = _join_i8_wrapper( - libjoin.left_join_indexer_unique, with_indexers=False - ) _engine_type = libindex.TimedeltaEngine @@ -294,25 +283,6 @@ def _union(self, other, sort): result._set_freq("infer") return result - def join(self, other, how="left", level=None, return_indexers=False, sort=False): - """ - See Index.join - """ - if _is_convertible_to_index(other): - try: - other = TimedeltaIndex(other) - except (TypeError, ValueError): - pass - - return Index.join( - self, - other, - how=how, - level=level, - return_indexers=return_indexers, - sort=sort, - ) - def _wrap_joined_index(self, joined, other): name = get_op_result_name(self, other) if ( @@ -569,24 +539,6 @@ def delete(self, loc): TimedeltaIndex._add_datetimelike_methods() -def _is_convertible_to_index(other) -> bool: - """ - return a boolean whether I can attempt conversion to a TimedeltaIndex - """ - if isinstance(other, TimedeltaIndex): - return True - elif len(other) > 0 and other.inferred_type not in ( - "floating", - "mixed-integer", - "integer", - "integer-na", - "mixed-integer-float", - "mixed", - ): - return True - return False - - def timedelta_range( start=None, end=None, periods=None, freq=None, name=None, closed=None ) -> TimedeltaIndex:
The only actual changed behavior is in the new `_is_convertible_to_index_for_join` method where it first checks: ``` if isinstance(other, cls): return False ``` In master, TimedeltaIndex uses _is_convertible_to_index which returns True here. The DTI method behavior is unchanged.
https://api.github.com/repos/pandas-dev/pandas/pulls/30595
2020-01-01T02:59:51Z
2020-01-01T04:28:06Z
2020-01-01T04:28:06Z
2020-01-01T04:37:15Z
BUG: DTA/TDA/PA add/sub object-dtype
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 610fe4afcc9a0..763a6fe560283 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -27,7 +27,6 @@ is_integer_dtype, is_list_like, is_object_dtype, - is_offsetlike, is_period_dtype, is_string_dtype, is_timedelta64_dtype, @@ -1075,8 +1074,6 @@ def _sub_period_array(self, other): f"cannot subtract {other.dtype}-dtype from {type(self).__name__}" ) - if len(self) != len(other): - raise ValueError("cannot subtract arrays/indices of unequal length") if self.freq != other.freq: msg = DIFFERENT_FREQ.format( cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr @@ -1093,14 +1090,13 @@ def _sub_period_array(self, other): new_values[mask] = NaT return new_values - def _addsub_offset_array(self, other, op): + def _addsub_object_array(self, other: np.ndarray, op): """ Add or subtract array-like of DateOffset objects Parameters ---------- - other : Index, np.ndarray - object-dtype containing pd.DateOffset objects + other : np.ndarray[object] op : {operator.add, operator.sub} Returns @@ -1124,7 +1120,12 @@ def _addsub_offset_array(self, other, op): kwargs = {} if not is_period_dtype(self): kwargs["freq"] = "infer" - return self._from_sequence(res_values, **kwargs) + try: + res = type(self)._from_sequence(res_values, **kwargs) + except ValueError: + # e.g. we've passed a Timestamp to TimedeltaArray + res = res_values + return res def _time_shift(self, periods, freq=None): """ @@ -1187,9 +1188,9 @@ def __add__(self, other): elif is_timedelta64_dtype(other): # TimedeltaIndex, ndarray[timedelta64] result = self._add_delta(other) - elif is_offsetlike(other): - # Array/Index of DateOffset objects - result = self._addsub_offset_array(other, operator.add) + elif is_object_dtype(other): + # e.g. Array/Index of DateOffset objects + result = self._addsub_object_array(other, operator.add) elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): # DatetimeIndex, ndarray[datetime64] return self._add_datetime_arraylike(other) @@ -1242,9 +1243,9 @@ def __sub__(self, other): elif is_timedelta64_dtype(other): # TimedeltaIndex, ndarray[timedelta64] result = self._add_delta(-other) - elif is_offsetlike(other): - # Array/Index of DateOffset objects - result = self._addsub_offset_array(other, operator.sub) + elif is_object_dtype(other): + # e.g. Array/Index of DateOffset objects + result = self._addsub_object_array(other, operator.sub) elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): # DatetimeIndex, ndarray[datetime64] result = self._sub_datetime_arraylike(other) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index df9a0c418f9ae..11f4131df62a6 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -510,13 +510,13 @@ def _add_datetimelike_scalar(self, other): dtype = DatetimeTZDtype(tz=other.tz) if other.tz else _NS_DTYPE return DatetimeArray(result, dtype=dtype, freq=self.freq) - def _addsub_offset_array(self, other, op): - # Add or subtract Array-like of DateOffset objects + def _addsub_object_array(self, other, op): + # Add or subtract Array-like of objects try: # TimedeltaIndex can only operate with a subset of DateOffset # subclasses. Incompatible classes will raise AttributeError, # which we re-raise as TypeError - return super()._addsub_offset_array(other, op) + return super()._addsub_object_array(other, op) except AttributeError: raise TypeError( f"Cannot add/subtract non-tick DateOffset to {type(self).__name__}" diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 1fae25de45423..8fc8b8300d21c 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -18,7 +18,6 @@ ) from pandas.core.dtypes.generic import ( ABCCategorical, - ABCDateOffset, ABCDatetimeIndex, ABCIndexClass, ABCPeriodArray, @@ -368,37 +367,6 @@ def is_categorical(arr) -> bool: return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr) -def is_offsetlike(arr_or_obj) -> bool: - """ - Check if obj or all elements of list-like is DateOffset - - Parameters - ---------- - arr_or_obj : object - - Returns - ------- - boolean - Whether the object is a DateOffset or listlike of DatetOffsets - - Examples - -------- - >>> is_offsetlike(pd.DateOffset(days=1)) - True - >>> is_offsetlike('offset') - False - >>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()]) - True - >>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()])) - False - """ - if isinstance(arr_or_obj, ABCDateOffset): - return True - elif is_list_like(arr_or_obj) and len(arr_or_obj) and is_object_dtype(arr_or_obj): - return all(isinstance(x, ABCDateOffset) for x in arr_or_obj) - return False - - def is_datetime64_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the datetime64 dtype. diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index afce374aebe05..57368a799138a 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -2307,6 +2307,32 @@ def test_dti_addsub_offset_arraylike( expected = tm.box_expected(expected, xbox) tm.assert_equal(res, expected) + @pytest.mark.parametrize("other_box", [pd.Index, np.array]) + def test_dti_addsub_object_arraylike( + self, tz_naive_fixture, box_with_array, other_box + ): + tz = tz_naive_fixture + + dti = pd.date_range("2017-01-01", periods=2, tz=tz) + dtarr = tm.box_expected(dti, box_with_array) + other = other_box([pd.offsets.MonthEnd(), pd.Timedelta(days=4)]) + xbox = get_upcast_box(box_with_array, other) + + expected = pd.DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture) + expected = tm.box_expected(expected, xbox) + + warn = None if box_with_array is pd.DataFrame else PerformanceWarning + with tm.assert_produces_warning(warn): + result = dtarr + other + tm.assert_equal(result, expected) + + expected = pd.DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture) + expected = tm.box_expected(expected, xbox) + + with tm.assert_produces_warning(warn): + result = dtarr - other + tm.assert_equal(result, expected) + @pytest.mark.parametrize("years", [-1, 0, 1]) @pytest.mark.parametrize("months", [-2, 0, 2]) diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index f0edcd11567d2..8bc952e85bb5d 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -1036,6 +1036,26 @@ def test_parr_add_sub_index(self): expected = pi - pi tm.assert_index_equal(result, expected) + def test_parr_add_sub_object_array(self): + pi = pd.period_range("2000-12-31", periods=3, freq="D") + parr = pi.array + + other = np.array([pd.Timedelta(days=1), pd.offsets.Day(2), 3]) + + with tm.assert_produces_warning(PerformanceWarning): + result = parr + other + + expected = pd.PeriodIndex( + ["2001-01-01", "2001-01-03", "2001-01-05"], freq="D" + ).array + tm.assert_equal(result, expected) + + with tm.assert_produces_warning(PerformanceWarning): + result = parr - other + + expected = pd.PeriodIndex(["2000-12-30"] * 3, freq="D").array + tm.assert_equal(result, expected) + class TestPeriodSeriesArithmetic: def test_ops_series_timedelta(self): diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index cc337f8fdd7ce..d61adf5ef2e7b 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -1469,6 +1469,40 @@ def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array): with tm.assert_produces_warning(PerformanceWarning): anchored - tdi + # ------------------------------------------------------------------ + # Unsorted + + def test_td64arr_add_sub_object_array(self, box_with_array): + tdi = pd.timedelta_range("1 day", periods=3, freq="D") + tdarr = tm.box_expected(tdi, box_with_array) + + other = np.array( + [pd.Timedelta(days=1), pd.offsets.Day(2), pd.Timestamp("2000-01-04")] + ) + + warn = PerformanceWarning if box_with_array is not pd.DataFrame else None + with tm.assert_produces_warning(warn): + result = tdarr + other + + expected = pd.Index( + [pd.Timedelta(days=2), pd.Timedelta(days=4), pd.Timestamp("2000-01-07")] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + with pytest.raises(TypeError): + with tm.assert_produces_warning(warn): + tdarr - other + + with tm.assert_produces_warning(warn): + result = other - tdarr + + expected = pd.Index( + [pd.Timedelta(0), pd.Timedelta(0), pd.Timestamp("2000-01-01")] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + class TestTimedeltaArraylikeMulDivOps: # Tests for timedelta64[ns] diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 667ee467f2f29..881ceddd52b0b 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -625,18 +625,6 @@ def test_is_complex_dtype(): assert com.is_complex_dtype(np.array([1 + 1j, 5])) -def test_is_offsetlike(): - assert com.is_offsetlike(np.array([pd.DateOffset(month=3), pd.offsets.Nano()])) - assert com.is_offsetlike(pd.offsets.MonthEnd()) - assert com.is_offsetlike(pd.Index([pd.DateOffset(second=1)])) - - assert not com.is_offsetlike(pd.Timedelta(1)) - assert not com.is_offsetlike(np.array([1 + 1j, 5])) - - # mixed case - assert not com.is_offsetlike(np.array([pd.DateOffset(), pd.Timestamp(0)])) - - @pytest.mark.parametrize( "input_param,result", [
Bonus: we get to get rid of is_offsetlike
https://api.github.com/repos/pandas-dev/pandas/pulls/30594
2020-01-01T02:54:24Z
2020-01-01T03:41:35Z
2020-01-01T03:41:34Z
2020-01-01T03:47:01Z
CLN: remove no-longer-reachable addsub_int_array
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ceeaf018eb5f3..610fe4afcc9a0 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1093,39 +1093,6 @@ def _sub_period_array(self, other): new_values[mask] = NaT return new_values - def _addsub_int_array(self, other, op): - """ - Add or subtract array-like of integers equivalent to applying - `_time_shift` pointwise. - - Parameters - ---------- - other : Index, ExtensionArray, np.ndarray - integer-dtype - op : {operator.add, operator.sub} - - Returns - ------- - result : same class as self - """ - # _addsub_int_array is overridden by PeriodArray - assert not is_period_dtype(self) - assert op in [operator.add, operator.sub] - - if self.freq is None: - # GH#19123 - raise NullFrequencyError("Cannot shift with no freq") - - elif isinstance(self.freq, Tick): - # easy case where we can convert to timedelta64 operation - td = Timedelta(self.freq) - return op(self, td * other) - - # We should only get here with DatetimeIndex; dispatch - # to _addsub_offset_array - assert not is_timedelta64_dtype(self) - return op(self, np.array(other) * self.freq) - def _addsub_offset_array(self, other, op): """ Add or subtract array-like of DateOffset objects diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index df057ce5a0104..36bbe6ee02de2 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -634,12 +634,23 @@ def _sub_period(self, other): return new_data - @Appender(dtl.DatetimeLikeArrayMixin._addsub_int_array.__doc__) def _addsub_int_array( - self, - other: Union[ABCPeriodArray, ABCSeries, ABCPeriodIndex, np.ndarray], - op: Callable[[Any], Any], - ) -> ABCPeriodArray: + self, other: np.ndarray, op: Callable[[Any], Any], + ) -> "PeriodArray": + """ + Add or subtract array of integers; equivalent to applying + `_time_shift` pointwise. + + Parameters + ---------- + other : np.ndarray[integer-dtype] + op : {operator.add, operator.sub} + + Returns + ------- + result : PeriodArray + """ + assert op in [operator.add, operator.sub] if op is operator.sub: other = -other
https://api.github.com/repos/pandas-dev/pandas/pulls/30592
2020-01-01T00:56:04Z
2020-01-01T02:25:59Z
2020-01-01T02:25:59Z
2020-01-01T02:28:33Z
CLN: datetimelike EA and Index cleanups
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index df057ce5a0104..c5be958feb831 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -476,11 +476,6 @@ def to_timestamp(self, freq=None, how="start"): # -------------------------------------------------------------------- # Array-like / EA-Interface Methods - def _formatter(self, boxed=False): - if boxed: - return str - return "'{}'".format - @Appender(dtl.DatetimeLikeArrayMixin._validate_fill_value.__doc__) def _validate_fill_value(self, fill_value): if isna(fill_value): @@ -492,6 +487,9 @@ def _validate_fill_value(self, fill_value): raise ValueError(f"'fill_value' should be a Period. Got '{fill_value}'.") return fill_value + def _values_for_argsort(self): + return self._data + # -------------------------------------------------------------------- def _time_shift(self, periods, freq=None): @@ -582,6 +580,11 @@ def asfreq(self, freq=None, how="E"): # ------------------------------------------------------------------ # Rendering Methods + def _formatter(self, boxed=False): + if boxed: + return str + return "'{}'".format + def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): """ actually format my specific types @@ -774,9 +777,6 @@ def _check_timedeltalike_freq_compat(self, other): _raise_on_incompatible(self, other) - def _values_for_argsort(self): - return self._data - PeriodArray._add_comparison_ops() diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index b95dfc9ba7580..df9a0c418f9ae 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -378,6 +378,9 @@ def astype(self, dtype, copy=True): return self return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy) + # ---------------------------------------------------------------- + # Reductions + def sum( self, axis=None, diff --git a/pandas/core/base.py b/pandas/core/base.py index 948b80fef4032..064a51bf0ce74 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -597,7 +597,7 @@ class IndexOpsMixin: # ndarray compatibility __array_priority__ = 1000 _deprecations: FrozenSet[str] = frozenset( - ["tolist", "item"] # tolist is not deprecated, just suppressed in the __dir__ + ["tolist"] # tolist is not deprecated, just suppressed in the __dir__ ) def transpose(self, *args, **kwargs): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 7ba04fc9d2fea..65bbb3d641fbf 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -122,7 +122,6 @@ class DatetimeIndexOpsMixin(ExtensionOpsMixin): ) resolution = cache_readonly(DatetimeLikeArrayMixin.resolution.fget) # type: ignore - _maybe_mask_results = ea_passthrough(DatetimeLikeArrayMixin._maybe_mask_results) __iter__ = ea_passthrough(DatetimeLikeArrayMixin.__iter__) mean = ea_passthrough(DatetimeLikeArrayMixin.mean) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9ff968bc554e4..1e9576f8bbb8e 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -35,7 +35,6 @@ DatetimeIndexOpsMixin, DatetimelikeDelegateMixin, DatetimeTimedeltaMixin, - ea_passthrough, ) from pandas.core.indexes.numeric import Int64Index from pandas.core.ops import get_op_result_name @@ -1135,8 +1134,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): is_normalized = cache_readonly(DatetimeArray.is_normalized.fget) # type: ignore _resolution = cache_readonly(DatetimeArray._resolution.fget) # type: ignore - _has_same_tz = ea_passthrough(DatetimeArray._has_same_tz) - def __getitem__(self, key): result = self._data.__getitem__(key) if is_scalar(result): @@ -1202,6 +1199,7 @@ def insert(self, loc, item): self._assert_can_do_op(item) if not self._has_same_tz(item) and not isna(item): raise ValueError("Passed item and index have different timezone") + # check freq can be preserved on edge cases if self.size and self.freq is not None: if item is NaT: diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 6465a0c1724af..56e5fc21bce97 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -20,7 +20,6 @@ ) from pandas.core.accessor import delegate_names -from pandas.core.algorithms import unique1d from pandas.core.arrays.period import PeriodArray, period_array, validate_dtype_freq from pandas.core.base import _shared_docs import pandas.core.common as com @@ -622,18 +621,6 @@ def _get_unique_index(self, dropna=False): res = res.dropna() return res - @Appender(Index.unique.__doc__) - def unique(self, level=None): - # override the Index.unique method for performance GH#23083 - if level is not None: - # this should never occur, but is retained to make the signature - # match Index.unique - self._validate_index_level(level) - - values = self._ndarray_values - result = unique1d(values) - return self._shallow_copy(result) - def get_loc(self, key, method=None, tolerance=None): """ Get integer location for requested label diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 480a4ae34bfb7..156e5317c008b 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -30,7 +30,6 @@ DatetimeIndexOpsMixin, DatetimelikeDelegateMixin, DatetimeTimedeltaMixin, - ea_passthrough, ) from pandas.core.indexes.numeric import Int64Index from pandas.core.ops import get_op_result_name @@ -50,9 +49,12 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin): "__neg__", "__pos__", "__abs__", + "sum", + "std", + "median", ] _raw_properties = {"components"} - _raw_methods = {"to_pytimedelta"} + _raw_methods = {"to_pytimedelta", "sum", "std", "median"} @delegate_names( @@ -151,9 +153,6 @@ def _join_i8_wrapper(joinf, **kwargs): _datetimelike_ops = TimedeltaArray._datetimelike_ops _datetimelike_methods = TimedeltaArray._datetimelike_methods _other_ops = TimedeltaArray._other_ops - sum = ea_passthrough(TimedeltaArray.sum) - std = ea_passthrough(TimedeltaArray.std) - median = ea_passthrough(TimedeltaArray.median) # ------------------------------------------------------------------- # Constructors diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 664f6ea75a3be..c507226054d2c 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -380,7 +380,6 @@ def apply(self, func, **kwargs): return nbs if not isinstance(result, Block): - # Exclude the 0-dim case so we can do reductions result = self.make_block(values=_block_shape(result, ndim=self.ndim)) return result
Working on sharing code between these classes better (xref #20587) and closing in on implementing ExtensionIndex.
https://api.github.com/repos/pandas-dev/pandas/pulls/30591
2020-01-01T00:19:57Z
2020-01-01T01:39:20Z
2020-01-01T01:39:20Z
2020-01-01T02:07:29Z
BUG: validate Index data is 1D + deprecate multi-dim indexing
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 40cbf67f1ea5e..00c1a61521a44 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -614,6 +614,7 @@ Deprecations - The ``pandas.util.testing`` module has been deprecated. Use the public API in ``pandas.testing`` documented at :ref:`api.general.testing` (:issue:`16232`). - ``pandas.SparseArray`` has been deprecated. Use ``pandas.arrays.SparseArray`` (:class:`arrays.SparseArray`) instead. (:issue:`30642`) - The parameter ``is_copy`` of :meth:`DataFrame.take` has been deprecated and will be removed in a future version. (:issue:`27357`) +- Support for multi-dimensional indexing (e.g. ``index[:, None]``) on a :class:`Index` is deprecated and will be removed in a future version, convert to a numpy array before indexing instead (:issue:`30588`) **Selecting Columns from a Grouped DataFrame** @@ -1075,6 +1076,7 @@ Other - Bug in :meth:`DataFrame.to_csv` when supplied a series with a ``dtype="string"`` and a ``na_rep``, the ``na_rep`` was being truncated to 2 characters. (:issue:`29975`) - Bug where :meth:`DataFrame.itertuples` would incorrectly determine whether or not namedtuples could be used for dataframes of 255 columns (:issue:`28282`) - Handle nested NumPy ``object`` arrays in :func:`testing.assert_series_equal` for ExtensionArray implementations (:issue:`30841`) +- Bug in :class:`Index` constructor incorrectly allowing 2-dimensional input arrays (:issue:`13601`, :issue:`27125`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index a67d31344ff55..1d87753be8cc3 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2007,9 +2007,10 @@ def __getitem__(self, key): if com.is_bool_indexer(key): key = check_bool_array_indexer(self, key) - return self._constructor( - values=self._codes[key], dtype=self.dtype, fastpath=True - ) + result = self._codes[key] + if result.ndim > 1: + return result + return self._constructor(result, dtype=self.dtype, fastpath=True) def __setitem__(self, key, value): """ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 06c1338dbf5ab..b381f43d33c53 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -543,8 +543,6 @@ def __getitem__(self, key): if result.ndim > 1: # To support MPL which performs slicing with 2 dim # even though it only has 1 dim by definition - if is_period: - return self._simple_new(result, dtype=self.dtype, freq=freq) return result return self._simple_new(result, dtype=self.dtype, freq=freq) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index e6cfca74048e7..e4efb187ffae5 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -500,8 +500,11 @@ def __getitem__(self, value): # scalar if not isinstance(left, ABCIndexClass): - if isna(left): + if is_scalar(left) and isna(left): return self._fill_value + if np.ndim(left) > 1: + # GH#30588 multi-dimensional indexer disallowed + raise ValueError("multi-dimensional indexing not allowed") return Interval(left, right, self.closed) return self._shallow_copy(left, right) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b02c0a08c93fa..143bb8dbda8b4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -393,6 +393,9 @@ def __new__( if kwargs: raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}") + if subarr.ndim > 1: + # GH#13601, GH#20285, GH#27125 + raise ValueError("Index data must be 1-dimensional") return cls._simple_new(subarr, name, **kwargs) elif hasattr(data, "__array__"): @@ -608,7 +611,7 @@ def __array_wrap__(self, result, context=None): Gets called after a ufunc. """ result = lib.item_from_zerodim(result) - if is_bool_dtype(result) or lib.is_scalar(result): + if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result attrs = self._get_attributes_dict() @@ -687,11 +690,10 @@ def astype(self, dtype, copy=True): return Index(np.asarray(self), dtype=dtype, copy=copy) try: - return Index( - self.values.astype(dtype, copy=copy), name=self.name, dtype=dtype - ) + casted = self.values.astype(dtype, copy=copy) except (TypeError, ValueError): raise TypeError(f"Cannot cast {type(self).__name__} to dtype {dtype}") + return Index(casted, name=self.name, dtype=dtype) _index_shared_docs[ "take" @@ -3902,6 +3904,9 @@ def __getitem__(self, key): key = com.values_from_object(key) result = getitem(key) if not is_scalar(result): + if np.ndim(result) > 1: + deprecate_ndim_indexing(result) + return result return promote(result) else: return result @@ -5533,3 +5538,17 @@ def _try_convert_to_int_array( pass raise ValueError + + +def deprecate_ndim_indexing(result): + if np.ndim(result) > 1: + # GH#27125 indexer like idx[:, None] expands dim, but we + # cannot do that and keep an index, so return ndarray + # Deprecation GH#30588 + warnings.warn( + "Support for multi-dimensional indexing (e.g. `index[:, None]`) " + "on an Index is deprecated and will be removed in a future " + "version. Convert to a numpy array before indexing instead.", + DeprecationWarning, + stacklevel=3, + ) diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index bd089f574a313..58fcce7e59be7 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -12,7 +12,7 @@ from pandas.core.dtypes.generic import ABCSeries from pandas.core.arrays import ExtensionArray -from pandas.core.indexes.base import Index +from pandas.core.indexes.base import Index, deprecate_ndim_indexing from pandas.core.ops import get_op_result_name @@ -178,6 +178,7 @@ def __getitem__(self, key): return type(self)(result, name=self.name) # Includes cases where we get a 2D ndarray back for MPL compat + deprecate_ndim_indexing(result) return result def __iter__(self): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 39cbe5f151262..b9b44284edaa9 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -73,6 +73,10 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None): else: subarr = data + if subarr.ndim > 1: + # GH#13601, GH#20285, GH#27125 + raise ValueError("Index data must be 1-dimensional") + name = maybe_extract_name(name, data, cls) return cls._simple_new(subarr, name=name) diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index d09dc586fe056..e027641288bb9 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -975,3 +975,9 @@ def test_engine_type(self, dtype, engine_type): ci.values._codes = ci.values._codes.astype("int64") assert np.issubdtype(ci.codes.dtype, dtype) assert isinstance(ci._engine, engine_type) + + def test_getitem_2d_deprecated(self): + # GH#30588 multi-dim indexing is deprecated, but raising is also acceptable + idx = self.create_index() + with pytest.raises(ValueError, match="cannot mask with array containing NA"): + idx[:, None] diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index ceb3ac8b61c0b..a16017b0e12c0 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -875,3 +875,11 @@ def test_engine_reference_cycle(self): nrefs_pre = len(gc.get_referrers(index)) index._engine assert len(gc.get_referrers(index)) == nrefs_pre + + def test_getitem_2d_deprecated(self): + # GH#30588 + idx = self.create_index() + with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): + res = idx[:, None] + + assert isinstance(res, np.ndarray), type(res) diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 91007a1ba529e..4c600e510790a 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -86,7 +86,9 @@ def test_dti_business_getitem(self): def test_dti_business_getitem_matplotlib_hackaround(self): rng = pd.bdate_range(START, END) - values = rng[:, None] + with tm.assert_produces_warning(DeprecationWarning): + # GH#30588 multi-dimensional indexing deprecated + values = rng[:, None] expected = rng.values[:, None] tm.assert_numpy_array_equal(values, expected) @@ -110,7 +112,9 @@ def test_dti_custom_getitem(self): def test_dti_custom_getitem_matplotlib_hackaround(self): rng = pd.bdate_range(START, END, freq="C") - values = rng[:, None] + with tm.assert_produces_warning(DeprecationWarning): + # GH#30588 multi-dimensional indexing deprecated + values = rng[:, None] expected = rng.values[:, None] tm.assert_numpy_array_equal(values, expected) diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py index 91f8dddea71d7..d8c2ba8413cfb 100644 --- a/pandas/tests/indexes/interval/test_base.py +++ b/pandas/tests/indexes/interval/test_base.py @@ -79,3 +79,10 @@ def test_where(self, closed, klass): expected = IntervalIndex([np.nan] + idx[1:].tolist()) result = idx.where(klass(cond)) tm.assert_index_equal(result, expected) + + def test_getitem_2d_deprecated(self): + # GH#30588 multi-dim indexing is deprecated, but raising is also acceptable + idx = self.create_index() + with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"): + with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): + idx[:, None] diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 2c7fc8b320325..8dd430b55117e 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -71,7 +71,9 @@ def test_can_hold_identifiers(self): @pytest.mark.parametrize("index", ["datetime"], indirect=True) def test_new_axis(self, index): - new_index = index[None, :] + with tm.assert_produces_warning(DeprecationWarning): + # GH#30588 multi-dimensional indexing deprecated + new_index = index[None, :] assert new_index.ndim == 2 assert isinstance(new_index, np.ndarray) @@ -2784,9 +2786,35 @@ def test_shape_of_invalid_index(): # about this). However, as long as this is not solved in general,this test ensures # that the returned shape is consistent with this underlying array for # compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775) - a = np.arange(8).reshape(2, 2, 2) - idx = pd.Index(a) - assert idx.shape == a.shape - idx = pd.Index([0, 1, 2, 3]) - assert idx[:, None].shape == (4, 1) + with tm.assert_produces_warning(DeprecationWarning): + # GH#30588 multi-dimensional indexing deprecated + assert idx[:, None].shape == (4, 1) + + +def test_validate_1d_input(): + # GH#27125 check that we do not have >1-dimensional input + msg = "Index data must be 1-dimensional" + + arr = np.arange(8).reshape(2, 2, 2) + with pytest.raises(ValueError, match=msg): + pd.Index(arr) + + with pytest.raises(ValueError, match=msg): + pd.Float64Index(arr.astype(np.float64)) + + with pytest.raises(ValueError, match=msg): + pd.Int64Index(arr.astype(np.int64)) + + with pytest.raises(ValueError, match=msg): + pd.UInt64Index(arr.astype(np.uint64)) + + df = pd.DataFrame(arr.reshape(4, 2)) + with pytest.raises(ValueError, match=msg): + pd.Index(df) + + # GH#13601 trying to assign a multi-dimensional array to an index is not + # allowed + ser = pd.Series(0, range(4)) + with pytest.raises(ValueError, match=msg): + ser.index = np.array([[2, 3]] * 4) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index ea4d8edd2f413..448a06070c45c 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -83,12 +83,9 @@ def test_getitem_ndarray_3d(self, index, obj, idxr, idxr_id): msg = ( r"Buffer has wrong number of dimensions \(expected 1," r" got 3\)|" - "The truth value of an array with more than one element is " - "ambiguous|" "Cannot index with multidimensional key|" r"Wrong number of dimensions. values.ndim != ndim \[3 != 1\]|" - "No matching signature found|" # TypeError - "unhashable type: 'numpy.ndarray'" # TypeError + "Index data must be 1-dimensional" ) if ( @@ -104,21 +101,12 @@ def test_getitem_ndarray_3d(self, index, obj, idxr, idxr_id): "categorical", ] ): - idxr[nd3] - else: - if ( - isinstance(obj, DataFrame) - and idxr_id == "getitem" - and index.inferred_type == "boolean" - ): - error = TypeError - elif idxr_id == "getitem" and index.inferred_type == "interval": - error = TypeError - else: - error = ValueError - - with pytest.raises(error, match=msg): + with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): idxr[nd3] + else: + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(DeprecationWarning): + idxr[nd3] @pytest.mark.parametrize( "index", tm.all_index_generator(5), ids=lambda x: type(x).__name__ @@ -146,16 +134,14 @@ def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id): nd3 = np.random.randint(5, size=(2, 2, 2)) msg = ( - r"Buffer has wrong number of dimensions \(expected 1, " - r"got 3\)|" - "The truth value of an array with more than one element is " - "ambiguous|" - "Only 1-dimensional input arrays are supported|" + r"Buffer has wrong number of dimensions \(expected 1," + r" got 3\)|" "'pandas._libs.interval.IntervalTree' object has no attribute " "'set_value'|" # AttributeError "unhashable type: 'numpy.ndarray'|" # TypeError "No matching signature found|" # TypeError - r"^\[\[\[" # pandas.core.indexing.IndexingError + r"^\[\[\[|" # pandas.core.indexing.IndexingError + "Index data must be 1-dimensional" ) if (idxr_id == "iloc") or ( @@ -176,10 +162,8 @@ def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id): ): idxr[nd3] = 0 else: - with pytest.raises( - (ValueError, AttributeError, TypeError, pd.core.indexing.IndexingError), - match=msg, - ): + err = (ValueError, AttributeError) + with pytest.raises(err, match=msg): idxr[nd3] = 0 def test_inf_upcast(self): diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index 39d6bc69e7c00..0038df78dd866 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -136,7 +136,7 @@ def test_write_with_index(self): # column multi-index df.index = [0, 1, 2] - df.columns = (pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]),) + df.columns = pd.MultiIndex.from_tuples([("a", 1)]) self.check_error_on_write(df, ValueError) def test_path_pathlib(self): diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 8fdf86ddabafe..9cd3ccbf9214e 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -66,11 +66,10 @@ def test_registering_no_warning(self): # Set to the "warn" state, in case this isn't the first test run register_matplotlib_converters() - with tm.assert_produces_warning(None) as w: + with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): + # GH#30588 DeprecationWarning from 2D indexing ax.plot(s.index, s.values) - assert len(w) == 0 - def test_pandas_plots_register(self): pytest.importorskip("matplotlib.pyplot") s = Series(range(12), index=date_range("2017", periods=12)) @@ -101,19 +100,16 @@ def test_option_no_warning(self): # Test without registering first, no warning with ctx: - with tm.assert_produces_warning(None) as w: + # GH#30588 DeprecationWarning from 2D indexing on Index + with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): ax.plot(s.index, s.values) - assert len(w) == 0 - # Now test with registering register_matplotlib_converters() with ctx: - with tm.assert_produces_warning(None) as w: + with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): ax.plot(s.index, s.values) - assert len(w) == 0 - def test_registry_resets(self): units = pytest.importorskip("matplotlib.units") dates = pytest.importorskip("matplotlib.dates") diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index a135c0cf7cd7e..a2d14f27d7b7a 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -137,7 +137,9 @@ def test_first_last_valid(self, datetime_series): assert ts.last_valid_index().freq == ts.index.freq def test_mpl_compat_hack(self, datetime_series): - result = datetime_series[:, np.newaxis] + with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): + # GH#30588 multi-dimensional indexing deprecated + result = datetime_series[:, np.newaxis] expected = datetime_series.values[:, np.newaxis] tm.assert_almost_equal(result, expected)
- [x] closes #13601 - [x] closes #27125 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This changes the behavior of `idx[:, None]` to return an ndarray instead of an invalid Index, needed to keep matplotlib tests working. See also #20285 which this does not entirely close. That becomes pretty easy to address though once a decision is made on whether to treat `[[0, 1], [2, 3]]` like `[(0, 1), (2, 3)]` (the latter becomes a MultiIndex, the former currently becomes an invalid Index)
https://api.github.com/repos/pandas-dev/pandas/pulls/30588
2019-12-31T20:31:38Z
2020-01-09T21:17:06Z
2020-01-09T21:17:06Z
2023-04-05T10:20:20Z
REF: share code between DatetimeIndex and TimedeltaIndex
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 7ba04fc9d2fea..88e70ac693a91 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -33,6 +33,7 @@ ) import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs +from pandas.core.indexes.numeric import Int64Index from pandas.core.tools.timedeltas import to_timedelta from pandas.tseries.frequencies import DateOffset, to_offset @@ -71,36 +72,6 @@ def method(self, other): return method -class DatetimeTimedeltaMixin: - """ - Mixin class for methods shared by DatetimeIndex and TimedeltaIndex, - but not PeriodIndex - """ - - def _set_freq(self, freq): - """ - Set the _freq attribute on our underlying DatetimeArray. - - Parameters - ---------- - freq : DateOffset, None, or "infer" - """ - # GH#29843 - if freq is None: - # Always valid - pass - elif len(self) == 0 and isinstance(freq, DateOffset): - # Always valid. In the TimedeltaIndex case, we assume this - # is a Tick offset. - pass - else: - # As an internal method, we can ensure this assertion always holds - assert freq == "infer" - freq = to_offset(self.inferred_freq) - - self._data._freq = freq - - class DatetimeIndexOpsMixin(ExtensionOpsMixin): """ Common ops mixin to support a unified interface datetimelike Index. @@ -126,6 +97,10 @@ class DatetimeIndexOpsMixin(ExtensionOpsMixin): __iter__ = ea_passthrough(DatetimeLikeArrayMixin.__iter__) mean = ea_passthrough(DatetimeLikeArrayMixin.mean) + @property + def is_all_dates(self) -> bool: + return True + @property def freq(self): """ @@ -606,66 +581,6 @@ def isin(self, values, level=None): return algorithms.isin(self.asi8, values.asi8) - def intersection(self, other, sort=False): - self._validate_sort_keyword(sort) - self._assert_can_do_setop(other) - - if self.equals(other): - return self._get_reconciled_name_object(other) - - if len(self) == 0: - return self.copy() - if len(other) == 0: - return other.copy() - - if not isinstance(other, type(self)): - result = Index.intersection(self, other, sort=sort) - if isinstance(result, type(self)): - if result.freq is None: - result._set_freq("infer") - return result - - elif ( - other.freq is None - or self.freq is None - or other.freq != self.freq - or not other.freq.is_anchored() - or (not self.is_monotonic or not other.is_monotonic) - ): - result = Index.intersection(self, other, sort=sort) - - # Invalidate the freq of `result`, which may not be correct at - # this point, depending on the values. - - result._set_freq(None) - if hasattr(self, "tz"): - result = self._shallow_copy( - result._values, name=result.name, tz=result.tz, freq=None - ) - else: - result = self._shallow_copy(result._values, name=result.name, freq=None) - if result.freq is None: - result._set_freq("infer") - return result - - # to make our life easier, "sort" the two ranges - if self[0] <= other[0]: - left, right = self, other - else: - left, right = other, self - - # after sorting, the intersection always starts with the right index - # and ends with the index of which the last elements is smallest - end = min(left[-1], right[-1]) - start = right[0] - - if end < start: - return type(self)(data=[]) - else: - lslice = slice(*left.slice_locs(start, end)) - left_chunk = left.values[lslice] - return self._shallow_copy(left_chunk) - @Appender(_index_shared_docs["repeat"] % _index_doc_kwargs) def repeat(self, repeats, axis=None): nv.validate_repeat(tuple(), dict(axis=axis)) @@ -778,6 +693,168 @@ def shift(self, periods=1, freq=None): return type(self)(result, name=self.name) +class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, Int64Index): + """ + Mixin class for methods shared by DatetimeIndex and TimedeltaIndex, + but not PeriodIndex + """ + + # Compat for frequency inference, see GH#23789 + _is_monotonic_increasing = Index.is_monotonic_increasing + _is_monotonic_decreasing = Index.is_monotonic_decreasing + _is_unique = Index.is_unique + + def _set_freq(self, freq): + """ + Set the _freq attribute on our underlying DatetimeArray. + + Parameters + ---------- + freq : DateOffset, None, or "infer" + """ + # GH#29843 + if freq is None: + # Always valid + pass + elif len(self) == 0 and isinstance(freq, DateOffset): + # Always valid. In the TimedeltaIndex case, we assume this + # is a Tick offset. + pass + else: + # As an internal method, we can ensure this assertion always holds + assert freq == "infer" + freq = to_offset(self.inferred_freq) + + self._data._freq = freq + + # -------------------------------------------------------------------- + # Set Operation Methods + + @Appender(Index.difference.__doc__) + def difference(self, other, sort=None): + new_idx = super().difference(other, sort=sort) + new_idx._set_freq(None) + return new_idx + + def intersection(self, other, sort=False): + """ + Specialized intersection for DatetimeIndex/TimedeltaIndex. + + May be much faster than Index.intersection + + Parameters + ---------- + other : Same type as self or array-like + sort : False or None, default False + Sort the resulting index if possible. + + .. versionadded:: 0.24.0 + + .. versionchanged:: 0.24.1 + + Changed the default to ``False`` to match the behaviour + from before 0.24.0. + + .. versionchanged:: 0.25.0 + + The `sort` keyword is added + + Returns + ------- + y : Index or same type as self + """ + self._validate_sort_keyword(sort) + self._assert_can_do_setop(other) + + if self.equals(other): + return self._get_reconciled_name_object(other) + + if len(self) == 0: + return self.copy() + if len(other) == 0: + return other.copy() + + if not isinstance(other, type(self)): + result = Index.intersection(self, other, sort=sort) + if isinstance(result, type(self)): + if result.freq is None: + result._set_freq("infer") + return result + + elif ( + other.freq is None + or self.freq is None + or other.freq != self.freq + or not other.freq.is_anchored() + or (not self.is_monotonic or not other.is_monotonic) + ): + result = Index.intersection(self, other, sort=sort) + + # Invalidate the freq of `result`, which may not be correct at + # this point, depending on the values. + + result._set_freq(None) + if hasattr(self, "tz"): + result = self._shallow_copy( + result._values, name=result.name, tz=result.tz, freq=None + ) + else: + result = self._shallow_copy(result._values, name=result.name, freq=None) + if result.freq is None: + result._set_freq("infer") + return result + + # to make our life easier, "sort" the two ranges + if self[0] <= other[0]: + left, right = self, other + else: + left, right = other, self + + # after sorting, the intersection always starts with the right index + # and ends with the index of which the last elements is smallest + end = min(left[-1], right[-1]) + start = right[0] + + if end < start: + return type(self)(data=[]) + else: + lslice = slice(*left.slice_locs(start, end)) + left_chunk = left.values[lslice] + return self._shallow_copy(left_chunk) + + def _can_fast_union(self, other) -> bool: + if not isinstance(other, type(self)): + return False + + freq = self.freq + + if freq is None or freq != other.freq: + return False + + if not self.is_monotonic or not other.is_monotonic: + return False + + if len(self) == 0 or len(other) == 0: + return True + + # to make our life easier, "sort" the two ranges + if self[0] <= other[0]: + left, right = self, other + else: + left, right = other, self + + right_start = right[0] + left_end = left[-1] + + # Only need to "adjoin", not overlap + try: + return (right_start == left_end + freq) or right_start in left + except ValueError: + # if we are comparing a freq that does not propagate timezones + # this will raise + return False + + def wrap_arithmetic_op(self, other, result): if result is NotImplemented: return NotImplemented diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9ff968bc554e4..e3c5d4ca34252 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -37,7 +37,6 @@ DatetimeTimedeltaMixin, ea_passthrough, ) -from pandas.core.indexes.numeric import Int64Index from pandas.core.ops import get_op_result_name import pandas.core.tools.datetimes as tools @@ -94,9 +93,7 @@ class DatetimeDelegateMixin(DatetimelikeDelegateMixin): typ="method", overwrite=False, ) -class DatetimeIndex( - DatetimeTimedeltaMixin, DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin -): +class DatetimeIndex(DatetimeTimedeltaMixin, DatetimeDelegateMixin): """ Immutable ndarray of datetime64 data, represented internally as int64, and which can be boxed to Timestamp objects that are subclasses of datetime and @@ -411,12 +408,6 @@ def _convert_for_op(self, value): return _to_M8(value) raise ValueError("Passed item and index have different timezone") - @Appender(Index.difference.__doc__) - def difference(self, other, sort=None): - new_idx = super().difference(other, sort=sort) - new_idx._set_freq(None) - return new_idx - # -------------------------------------------------------------------- # Rendering Methods @@ -469,7 +460,7 @@ def _union(self, other, sort): if result.freq is None and ( this.freq is not None or other.freq is not None ): - result._data._freq = to_offset(result.inferred_freq) + result._set_freq("infer") return result def union_many(self, others): @@ -502,39 +493,6 @@ def union_many(self, others): this._data._dtype = dtype return this - def _can_fast_union(self, other) -> bool: - if not isinstance(other, DatetimeIndex): - return False - - freq = self.freq - - if freq is None or freq != other.freq: - return False - - if not self.is_monotonic or not other.is_monotonic: - return False - - if len(self) == 0 or len(other) == 0: - return True - - # to make our life easier, "sort" the two ranges - if self[0] <= other[0]: - left, right = self, other - else: - left, right = other, self - - right_start = right[0] - left_end = left[-1] - - # Only need to "adjoin", not overlap - try: - return (right_start == left_end + freq) or right_start in left - except (ValueError): - - # if we are comparing a freq that does not propagate timezones - # this will raise - return False - def _fast_union(self, other, sort=None): if len(other) == 0: return self.view(type(self)) @@ -574,30 +532,6 @@ def _fast_union(self, other, sort=None): else: return left - def intersection(self, other, sort=False): - """ - Specialized intersection for DatetimeIndex objects. - May be much faster than Index.intersection - - Parameters - ---------- - other : DatetimeIndex or array-like - sort : False or None, default False - Sort the resulting index if possible. - - .. versionadded:: 0.24.0 - - .. versionchanged:: 0.24.1 - - Changed the default to ``False`` to match the behaviour - from before 0.24.0. - - Returns - ------- - Index or DatetimeIndex or TimedeltaIndex - """ - return super().intersection(other, sort=sort) - def _wrap_setop_result(self, other, result): name = get_op_result_name(self, other) return self._shallow_copy(result, name=name, freq=None, tz=self.tz) @@ -1126,11 +1060,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): # -------------------------------------------------------------------- # Wrapping DatetimeArray - # Compat for frequency inference, see GH#23789 - _is_monotonic_increasing = Index.is_monotonic_increasing - _is_monotonic_decreasing = Index.is_monotonic_decreasing - _is_unique = Index.is_unique - _timezone = cache_readonly(DatetimeArray._timezone.fget) # type: ignore is_normalized = cache_readonly(DatetimeArray.is_normalized.fget) # type: ignore _resolution = cache_readonly(DatetimeArray._resolution.fget) # type: ignore @@ -1173,10 +1102,6 @@ def inferred_type(self) -> str: # sure we can't have ambiguous indexing return "datetime64" - @property - def is_all_dates(self) -> bool: - return True - def insert(self, loc, item): """ Make new Index inserting new item at location diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 6465a0c1724af..fc171a89d7efe 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -34,7 +34,8 @@ DatetimeIndexOpsMixin, DatetimelikeDelegateMixin, ) -from pandas.core.indexes.datetimes import DatetimeIndex, Index, Int64Index +from pandas.core.indexes.datetimes import DatetimeIndex, Index +from pandas.core.indexes.numeric import Int64Index from pandas.core.missing import isna from pandas.core.ops import get_op_result_name from pandas.core.tools.datetimes import DateParseError, parse_time_string @@ -511,10 +512,6 @@ def searchsorted(self, value, side="left", sorter=None): return self._ndarray_values.searchsorted(value, side=side, sorter=sorter) - @property - def is_all_dates(self) -> bool: - return True - @property def is_full(self) -> bool: """ @@ -808,10 +805,6 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False) return self._apply_meta(result), lidx, ridx return self._apply_meta(result) - @Appender(Index.intersection.__doc__) - def intersection(self, other, sort=False): - return Index.intersection(self, other, sort=sort) - def _assert_can_do_setop(self, other): super()._assert_can_do_setop(other) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 480a4ae34bfb7..658111255e6aa 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -32,7 +32,6 @@ DatetimeTimedeltaMixin, ea_passthrough, ) -from pandas.core.indexes.numeric import Int64Index from pandas.core.ops import get_op_result_name from pandas.tseries.frequencies import to_offset @@ -65,11 +64,7 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin): overwrite=True, ) class TimedeltaIndex( - DatetimeTimedeltaMixin, - DatetimeIndexOpsMixin, - dtl.TimelikeOps, - Int64Index, - TimedeltaDelegateMixin, + DatetimeTimedeltaMixin, dtl.TimelikeOps, TimedeltaDelegateMixin, ): """ Immutable ndarray of timedelta64 data, represented internally as int64, and @@ -255,11 +250,6 @@ def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): # ------------------------------------------------------------------- # Wrapping TimedeltaArray - # Compat for frequency inference, see GH#23789 - _is_monotonic_increasing = Index.is_monotonic_increasing - _is_monotonic_decreasing = Index.is_monotonic_decreasing - _is_unique = Index.is_unique - @property def _box_func(self): return lambda x: Timedelta(x, unit="ns") @@ -324,40 +314,6 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False) sort=sort, ) - def intersection(self, other, sort=False): - """ - Specialized intersection for TimedeltaIndex objects. - May be much faster than Index.intersection - - Parameters - ---------- - other : TimedeltaIndex or array-like - sort : False or None, default False - Sort the resulting index if possible. - - .. versionadded:: 0.24.0 - - .. versionchanged:: 0.24.1 - - Changed the default to ``False`` to match the behaviour - from before 0.24.0. - - .. versionchanged:: 0.25.0 - - The `sort` keyword is added - - Returns - ------- - y : Index or TimedeltaIndex - """ - return super().intersection(other, sort=sort) - - @Appender(Index.difference.__doc__) - def difference(self, other, sort=None): - new_idx = super().difference(other, sort=sort) - new_idx._set_freq(None) - return new_idx - def _wrap_joined_index(self, joined, other): name = get_op_result_name(self, other) if ( @@ -370,33 +326,6 @@ def _wrap_joined_index(self, joined, other): else: return self._simple_new(joined, name) - def _can_fast_union(self, other): - if not isinstance(other, TimedeltaIndex): - return False - - freq = self.freq - - if freq is None or freq != other.freq: - return False - - if not self.is_monotonic or not other.is_monotonic: - return False - - if len(self) == 0 or len(other) == 0: - return True - - # to make our life easier, "sort" the two ranges - if self[0] <= other[0]: - left, right = self, other - else: - left, right = other, self - - right_start = right[0] - left_end = left[-1] - - # Only need to "adjoin", not overlap - return (right_start == left_end + freq) or right_start in left - def _fast_union(self, other): if len(other) == 0: return self.view(type(self)) @@ -557,10 +486,6 @@ def is_type_compatible(self, typ) -> bool: def inferred_type(self) -> str: return "timedelta64" - @property - def is_all_dates(self) -> bool: - return True - def insert(self, loc, item): """ Make new Index inserting new item at location
https://api.github.com/repos/pandas-dev/pandas/pulls/30587
2019-12-31T17:55:12Z
2020-01-01T01:42:25Z
2020-01-01T01:42:25Z
2020-01-01T02:08:09Z
REF: separate casting out of Index.__new__
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a3808f6f4a37e..aa41e2d591029 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -349,41 +349,8 @@ def __new__( # they are actually ints, e.g. '0' and 0.0 # should not be coerced # GH 11836 - if is_integer_dtype(dtype): - inferred = lib.infer_dtype(data, skipna=False) - if inferred == "integer": - data = maybe_cast_to_integer_array(data, dtype, copy=copy) - elif inferred in ["floating", "mixed-integer-float"]: - if isna(data).any(): - raise ValueError("cannot convert float NaN to integer") - - if inferred == "mixed-integer-float": - data = maybe_cast_to_integer_array(data, dtype) - - # If we are actually all equal to integers, - # then coerce to integer. - try: - return cls._try_convert_to_int_index( - data, copy, name, dtype - ) - except ValueError: - pass - - # Return an actual float index. - return Float64Index(data, copy=copy, name=name) - - elif inferred == "string": - pass - else: - data = data.astype(dtype) - elif is_float_dtype(dtype): - inferred = lib.infer_dtype(data, skipna=False) - if inferred == "string": - pass - else: - data = data.astype(dtype) - else: - data = np.array(data, dtype=dtype, copy=copy) + data = _maybe_cast_with_dtype(data, dtype, copy) + dtype = data.dtype # TODO: maybe not for object? # maybe coerce to a sub-class if is_signed_integer_dtype(data.dtype): @@ -5486,3 +5453,101 @@ def maybe_extract_name(name, obj, cls) -> Optional[Hashable]: raise TypeError(f"{cls.__name__}.name must be a hashable type") return name + + +def _maybe_cast_with_dtype(data: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: + """ + If a dtype is passed, cast to the closest matching dtype that is supported + by Index. + + Parameters + ---------- + data : np.ndarray + dtype : np.dtype + copy : bool + + Returns + ------- + np.ndarray + """ + # we need to avoid having numpy coerce + # things that look like ints/floats to ints unless + # they are actually ints, e.g. '0' and 0.0 + # should not be coerced + # GH 11836 + if is_integer_dtype(dtype): + inferred = lib.infer_dtype(data, skipna=False) + if inferred == "integer": + data = maybe_cast_to_integer_array(data, dtype, copy=copy) + elif inferred in ["floating", "mixed-integer-float"]: + if isna(data).any(): + raise ValueError("cannot convert float NaN to integer") + + if inferred == "mixed-integer-float": + data = maybe_cast_to_integer_array(data, dtype) + + # If we are actually all equal to integers, + # then coerce to integer. + try: + data = _try_convert_to_int_array(data, copy, dtype) + except ValueError: + data = np.array(data, dtype=np.float64, copy=copy) + + elif inferred == "string": + pass + else: + data = data.astype(dtype) + elif is_float_dtype(dtype): + inferred = lib.infer_dtype(data, skipna=False) + if inferred == "string": + pass + else: + data = data.astype(dtype) + else: + data = np.array(data, dtype=dtype, copy=copy) + + return data + + +def _try_convert_to_int_array( + data: np.ndarray, copy: bool, dtype: np.dtype +) -> np.ndarray: + """ + Attempt to convert an array of data into an integer array. + + Parameters + ---------- + data : The data to convert. + copy : bool + Whether to copy the data or not. + dtype : np.dtype + + Returns + ------- + int_array : data converted to either an ndarray[int64] or ndarray[uint64] + + Raises + ------ + ValueError if the conversion was not successful. + """ + + if not is_unsigned_integer_dtype(dtype): + # skip int64 conversion attempt if uint-like dtype is passed, as + # this could return Int64Index when UInt64Index is what's desired + try: + res = data.astype("i8", copy=False) + if (res == data).all(): + return res # TODO: might still need to copy + except (OverflowError, TypeError, ValueError): + pass + + # Conversion to int64 failed (possibly due to overflow) or was skipped, + # so let's try now with uint64. + try: + res = data.astype("u8", copy=False) + if (res == data).all(): + return res # TODO: might still need to copy + except (OverflowError, TypeError, ValueError): + pass + + raise ValueError
first of two PRs to separate array casting/inference out of `Index.__new__`. Once both are in place, we'll be able to do all inference/casting up-front and simplify the constructor quite a bit. We'll also be able to look into sharing code between Index/Series/array, and address a handful of outstanding issues with the Index constructor.
https://api.github.com/repos/pandas-dev/pandas/pulls/30586
2019-12-31T17:29:20Z
2020-01-01T02:18:25Z
2020-01-01T02:18:25Z
2020-01-01T03:09:51Z
BUG: Disable parallel cythonize on Windows (GH 30214)
diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml index 03529bd6569c6..187a5db99802f 100644 --- a/ci/azure/windows.yml +++ b/ci/azure/windows.yml @@ -34,7 +34,7 @@ jobs: - bash: | source activate pandas-dev conda list - python setup.py build_ext -q -i + python setup.py build_ext -q -i -j 4 python -m pip install --no-build-isolation -e . displayName: 'Build' diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index e8663853b7684..acf798ccfce99 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -980,7 +980,8 @@ Other - Fixed :class:`IntegerArray` returning ``inf`` rather than ``NaN`` for operations dividing by 0 (:issue:`27398`) - Fixed ``pow`` operations for :class:`IntegerArray` when the other value is ``0`` or ``1`` (:issue:`29997`) - Bug in :meth:`Series.count` raises if use_inf_as_na is enabled (:issue:`29478`) -- Bug in :class:`Index` where a non-hashable name could be set without raising ``TypeError`` (:issue:29069`) +- Bug in :class:`Index` where a non-hashable name could be set without raising ``TypeError`` (:issue:`29069`) + .. _whatsnew_1000.contributors: diff --git a/setup.py b/setup.py index af70ee3b30095..489a9602511e8 100755 --- a/setup.py +++ b/setup.py @@ -526,6 +526,11 @@ def maybe_cythonize(extensions, *args, **kwargs): elif parsed.j: nthreads = parsed.j + # GH#30356 Cythonize doesn't support parallel on Windows + if is_platform_windows() and nthreads > 0: + print("Parallel build for cythonize ignored on Windows") + nthreads = 0 + kwargs["nthreads"] = nthreads build_ext.render_templates(_pxifiles) return cythonize(extensions, *args, **kwargs)
- [x ] closes #30356 - [ ] tests added / passed - N/A - [ ] passes `black pandas` - Didn't run - changes too many files - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Added test in `setup.py` to check if nthreads is positive and on Windows.
https://api.github.com/repos/pandas-dev/pandas/pulls/30585
2019-12-31T17:09:11Z
2020-01-01T01:34:36Z
2020-01-01T01:34:36Z
2020-01-03T21:41:05Z
ENH: Add dropna in groupby to allow NaN in keys
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index c5f58425139ee..ddba3dc452e28 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -199,6 +199,33 @@ For example, the groups created by ``groupby()`` below are in the order they app df3.groupby(['X']).get_group('B') +.. _groupby.dropna: + +.. versionadded:: 1.1.0 + +GroupBy dropna +^^^^^^^^^^^^^^ + +By default ``NA`` values are excluded from group keys during the ``groupby`` operation. However, +in case you want to include ``NA`` values in group keys, you could pass ``dropna=False`` to achieve it. + +.. ipython:: python + + df_list = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]] + df_dropna = pd.DataFrame(df_list, columns=["a", "b", "c"]) + + df_dropna + +.. ipython:: python + + # Default `dropna` is set to True, which will exclude NaNs in keys + df_dropna.groupby(by=["b"], dropna=True).sum() + + # In order to allow NaN in keys, set `dropna` to False + df_dropna.groupby(by=["b"], dropna=False).sum() + +The default setting of ``dropna`` argument is ``True`` which means ``NA`` are not included in group keys. + .. _groupby.attributes: diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 9c424f70b1ee0..55af0b218a2c7 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -36,6 +36,37 @@ For example: ser["2014"] ser.loc["May 2015"] + +.. _whatsnew_110.groupby_key: + +Allow NA in groupby key +^^^^^^^^^^^^^^^^^^^^^^^^ + +With :ref:`groupby <groupby.dropna>` , we've added a ``dropna`` keyword to :meth:`DataFrame.groupby` and :meth:`Series.groupby` in order to +allow ``NA`` values in group keys. Users can define ``dropna`` to ``False`` if they want to include +``NA`` values in groupby keys. The default is set to ``True`` for ``dropna`` to keep backwards +compatibility (:issue:`3729`) + +.. ipython:: python + + df_list = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]] + df_dropna = pd.DataFrame(df_list, columns=["a", "b", "c"]) + + df_dropna + +.. ipython:: python + + # Default `dropna` is set to True, which will exclude NaNs in keys + df_dropna.groupby(by=["b"], dropna=True).sum() + + # In order to allow NaN in keys, set `dropna` to False + df_dropna.groupby(by=["b"], dropna=False).sum() + +The default setting of ``dropna`` argument is ``True`` which means ``NA`` are not included in group keys. + +.. versionadded:: 1.1.0 + + .. _whatsnew_110.key_sorting: Sorting with keys diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c2115094918e5..aeb0c2d32c31c 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -517,7 +517,11 @@ def _factorize_array( ), ) def factorize( - values, sort: bool = False, na_sentinel: int = -1, size_hint: Optional[int] = None + values, + sort: bool = False, + na_sentinel: int = -1, + size_hint: Optional[int] = None, + dropna: bool = True, ) -> Tuple[np.ndarray, Union[np.ndarray, ABCIndex]]: """ Encode the object as an enumerated type or categorical variable. @@ -643,6 +647,14 @@ def factorize( uniques, codes, na_sentinel=na_sentinel, assume_unique=True, verify=False ) + code_is_na = codes == na_sentinel + if not dropna and code_is_na.any(): + # na_value is set based on the dtype of uniques, and compat set to False is + # because we do not want na_value to be 0 for integers + na_value = na_value_for_dtype(uniques.dtype, compat=False) + uniques = np.append(uniques, [na_value]) + codes = np.where(code_is_na, len(uniques) - 1, codes) + uniques = _reconstruct_data(uniques, dtype, original) # return original tenor diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4e86b3710a1bd..19caf42823fa3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6139,6 +6139,41 @@ def update( Type Captive 210.0 Wild 185.0 + +We can also choose to include NA in group keys or not by setting +`dropna` parameter, the default setting is `True`: + +>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]] +>>> df = pd.DataFrame(l, columns=["a", "b", "c"]) + +>>> df.groupby(by=["b"]).sum() + a c +b +1.0 2 3 +2.0 2 5 + +>>> df.groupby(by=["b"], dropna=False).sum() + a c +b +1.0 2 3 +2.0 2 5 +NaN 1 4 + +>>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]] +>>> df = pd.DataFrame(l, columns=["a", "b", "c"]) + +>>> df.groupby(by="a").sum() + b c +a +a 13.0 13.0 +b 12.3 123.0 + +>>> df.groupby(by="a", dropna=False).sum() + b c +a +a 13.0 13.0 +b 12.3 123.0 +NaN 12.3 33.0 """ ) @Appender(_shared_docs["groupby"] % _shared_doc_kwargs) @@ -6152,6 +6187,7 @@ def groupby( group_keys: bool = True, squeeze: bool = False, observed: bool = False, + dropna: bool = True, ) -> "DataFrameGroupBy": from pandas.core.groupby.generic import DataFrameGroupBy @@ -6169,6 +6205,7 @@ def groupby( group_keys=group_keys, squeeze=squeeze, observed=observed, + dropna=dropna, ) _shared_docs[ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b550857252466..792e5a1228fe6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7475,6 +7475,12 @@ def clip( If False: show all values for categorical groupers. .. versionadded:: 0.23.0 + dropna : bool, default True + If True, and if group keys contain NA values, NA values together + with row/column will be dropped. + If False, NA values will also be treated as the key in groups + + .. versionadded:: 1.1.0 Returns ------- diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 81c3fd7ad9e89..b92ff1c7c8ca4 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -474,6 +474,7 @@ def __init__( squeeze: bool = False, observed: bool = False, mutated: bool = False, + dropna: bool = True, ): self._selection = selection @@ -496,6 +497,7 @@ def __init__( self.squeeze = squeeze self.observed = observed self.mutated = mutated + self.dropna = dropna if grouper is None: from pandas.core.groupby.grouper import get_grouper @@ -508,6 +510,7 @@ def __init__( sort=sort, observed=observed, mutated=self.mutated, + dropna=self.dropna, ) self.obj = obj @@ -2649,6 +2652,7 @@ def get_groupby( squeeze: bool = False, observed: bool = False, mutated: bool = False, + dropna: bool = True, ) -> GroupBy: klass: Type[GroupBy] @@ -2677,4 +2681,5 @@ def get_groupby( squeeze=squeeze, observed=observed, mutated=mutated, + dropna=dropna, ) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index f84ca6c05f40f..948b4ba27f705 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -134,7 +134,9 @@ def __new__(cls, *args, **kwargs): cls = TimeGrouper return super().__new__(cls) - def __init__(self, key=None, level=None, freq=None, axis=0, sort=False): + def __init__( + self, key=None, level=None, freq=None, axis=0, sort=False, dropna=True + ): self.key = key self.level = level self.freq = freq @@ -146,6 +148,7 @@ def __init__(self, key=None, level=None, freq=None, axis=0, sort=False): self.indexer = None self.binner = None self._grouper = None + self.dropna = dropna @property def ax(self): @@ -171,6 +174,7 @@ def _get_grouper(self, obj, validate: bool = True): level=self.level, sort=self.sort, validate=validate, + dropna=self.dropna, ) return self.binner, self.grouper, self.obj @@ -283,6 +287,7 @@ def __init__( sort: bool = True, observed: bool = False, in_axis: bool = False, + dropna: bool = True, ): self.name = name self.level = level @@ -293,6 +298,7 @@ def __init__( self.obj = obj self.observed = observed self.in_axis = in_axis + self.dropna = dropna # right place for this? if isinstance(grouper, (Series, Index)) and name is None: @@ -446,7 +452,9 @@ def _make_codes(self) -> None: codes = self.grouper.codes_info uniques = self.grouper.result_index else: - codes, uniques = algorithms.factorize(self.grouper, sort=self.sort) + codes, uniques = algorithms.factorize( + self.grouper, sort=self.sort, dropna=self.dropna + ) uniques = Index(uniques, name=self.name) self._codes = codes self._group_index = uniques @@ -465,6 +473,7 @@ def get_grouper( observed: bool = False, mutated: bool = False, validate: bool = True, + dropna: bool = True, ) -> "Tuple[ops.BaseGrouper, List[Hashable], FrameOrSeries]": """ Create and return a BaseGrouper, which is an internal @@ -655,6 +664,7 @@ def is_in_obj(gpr) -> bool: sort=sort, observed=observed, in_axis=in_axis, + dropna=dropna, ) if not isinstance(gpr, Grouping) else gpr diff --git a/pandas/core/series.py b/pandas/core/series.py index eb409b432f89c..388395902c0f6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1609,6 +1609,34 @@ def _set_name(self, name, inplace=False) -> "Series": Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 + +We can also choose to include `NA` in group keys or not by defining +`dropna` parameter, the default setting is `True`: + +>>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) +>>> ser.groupby(level=0).sum() +a 3 +b 3 +dtype: int64 + +>>> ser.groupby(level=0, dropna=False).sum() +a 3 +b 3 +NaN 3 +dtype: int64 + +>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] +>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") +>>> ser.groupby(["a", "b", "a", np.nan]).mean() +a 210.0 +b 350.0 +Name: Max Speed, dtype: float64 + +>>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() +a 210.0 +b 350.0 +NaN 20.0 +Name: Max Speed, dtype: float64 """ ) @Appender(generic._shared_docs["groupby"] % _shared_doc_kwargs) @@ -1622,6 +1650,7 @@ def groupby( group_keys: bool = True, squeeze: bool = False, observed: bool = False, + dropna: bool = True, ) -> "SeriesGroupBy": from pandas.core.groupby.generic import SeriesGroupBy @@ -1639,6 +1668,7 @@ def groupby( group_keys=group_keys, squeeze=squeeze, observed=observed, + dropna=dropna, ) # ---------------------------------------------------------------------- diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py new file mode 100644 index 0000000000000..1a525d306e9f5 --- /dev/null +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -0,0 +1,244 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas.testing as tm + + +@pytest.mark.parametrize( + "dropna, tuples, outputs", + [ + ( + True, + [["A", "B"], ["B", "A"]], + {"c": [13.0, 123.23], "d": [13.0, 123.0], "e": [13.0, 1.0]}, + ), + ( + False, + [["A", "B"], ["A", np.nan], ["B", "A"]], + { + "c": [13.0, 12.3, 123.23], + "d": [13.0, 233.0, 123.0], + "e": [13.0, 12.0, 1.0], + }, + ), + ], +) +def test_groupby_dropna_multi_index_dataframe_nan_in_one_group( + dropna, tuples, outputs, nulls_fixture +): + # GH 3729 this is to test that NA is in one group + df_list = [ + ["A", "B", 12, 12, 12], + ["A", nulls_fixture, 12.3, 233.0, 12], + ["B", "A", 123.23, 123, 1], + ["A", "B", 1, 1, 1.0], + ] + df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"]) + grouped = df.groupby(["a", "b"], dropna=dropna).sum() + + mi = pd.MultiIndex.from_tuples(tuples, names=list("ab")) + + # Since right now, by default MI will drop NA from levels when we create MI + # via `from_*`, so we need to add NA for level manually afterwards. + if not dropna: + mi = mi.set_levels(["A", "B", np.nan], level="b") + expected = pd.DataFrame(outputs, index=mi) + + tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.parametrize( + "dropna, tuples, outputs", + [ + ( + True, + [["A", "B"], ["B", "A"]], + {"c": [12.0, 123.23], "d": [12.0, 123.0], "e": [12.0, 1.0]}, + ), + ( + False, + [["A", "B"], ["A", np.nan], ["B", "A"], [np.nan, "B"]], + { + "c": [12.0, 13.3, 123.23, 1.0], + "d": [12.0, 234.0, 123.0, 1.0], + "e": [12.0, 13.0, 1.0, 1.0], + }, + ), + ], +) +def test_groupby_dropna_multi_index_dataframe_nan_in_two_groups( + dropna, tuples, outputs, nulls_fixture, nulls_fixture2 +): + # GH 3729 this is to test that NA in different groups with different representations + df_list = [ + ["A", "B", 12, 12, 12], + ["A", nulls_fixture, 12.3, 233.0, 12], + ["B", "A", 123.23, 123, 1], + [nulls_fixture2, "B", 1, 1, 1.0], + ["A", nulls_fixture2, 1, 1, 1.0], + ] + df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"]) + grouped = df.groupby(["a", "b"], dropna=dropna).sum() + + mi = pd.MultiIndex.from_tuples(tuples, names=list("ab")) + + # Since right now, by default MI will drop NA from levels when we create MI + # via `from_*`, so we need to add NA for level manually afterwards. + if not dropna: + mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]]) + expected = pd.DataFrame(outputs, index=mi) + + tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.parametrize( + "dropna, idx, outputs", + [ + (True, ["A", "B"], {"b": [123.23, 13.0], "c": [123.0, 13.0], "d": [1.0, 13.0]}), + ( + False, + ["A", "B", np.nan], + { + "b": [123.23, 13.0, 12.3], + "c": [123.0, 13.0, 233.0], + "d": [1.0, 13.0, 12.0], + }, + ), + ], +) +def test_groupby_dropna_normal_index_dataframe(dropna, idx, outputs): + # GH 3729 + df_list = [ + ["B", 12, 12, 12], + [None, 12.3, 233.0, 12], + ["A", 123.23, 123, 1], + ["B", 1, 1, 1.0], + ] + df = pd.DataFrame(df_list, columns=["a", "b", "c", "d"]) + grouped = df.groupby("a", dropna=dropna).sum() + + expected = pd.DataFrame(outputs, index=pd.Index(idx, dtype="object", name="a")) + + tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.parametrize( + "dropna, idx, expected", + [ + (True, ["a", "a", "b", np.nan], pd.Series([3, 3], index=["a", "b"])), + ( + False, + ["a", "a", "b", np.nan], + pd.Series([3, 3, 3], index=["a", "b", np.nan]), + ), + ], +) +def test_groupby_dropna_series_level(dropna, idx, expected): + ser = pd.Series([1, 2, 3, 3], index=idx) + + result = ser.groupby(level=0, dropna=dropna).sum() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dropna, expected", + [ + (True, pd.Series([210.0, 350.0], index=["a", "b"], name="Max Speed")), + ( + False, + pd.Series([210.0, 350.0, 20.0], index=["a", "b", np.nan], name="Max Speed"), + ), + ], +) +def test_groupby_dropna_series_by(dropna, expected): + ser = pd.Series( + [390.0, 350.0, 30.0, 20.0], + index=["Falcon", "Falcon", "Parrot", "Parrot"], + name="Max Speed", + ) + + result = ser.groupby(["a", "b", "a", np.nan], dropna=dropna).mean() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dropna, tuples, outputs", + [ + ( + True, + [["A", "B"], ["B", "A"]], + {"c": [13.0, 123.23], "d": [12.0, 123.0], "e": [1.0, 1.0]}, + ), + ( + False, + [["A", "B"], ["A", np.nan], ["B", "A"]], + { + "c": [13.0, 12.3, 123.23], + "d": [12.0, 233.0, 123.0], + "e": [1.0, 12.0, 1.0], + }, + ), + ], +) +def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs): + # GH 3729 + df_list = [ + ["A", "B", 12, 12, 12], + ["A", None, 12.3, 233.0, 12], + ["B", "A", 123.23, 123, 1], + ["A", "B", 1, 1, 1.0], + ] + df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"]) + agg_dict = {"c": sum, "d": max, "e": "min"} + grouped = df.groupby(["a", "b"], dropna=dropna).agg(agg_dict) + + mi = pd.MultiIndex.from_tuples(tuples, names=list("ab")) + + # Since right now, by default MI will drop NA from levels when we create MI + # via `from_*`, so we need to add NA for level manually afterwards. + if not dropna: + mi = mi.set_levels(["A", "B", np.nan], level="b") + expected = pd.DataFrame(outputs, index=mi) + + tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.parametrize( + "datetime1, datetime2", + [ + (pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")), + (pd.Timedelta("-2 days"), pd.Timedelta("-1 days")), + (pd.Period("2020-01-01"), pd.Period("2020-02-01")), + ], +) +@pytest.mark.parametrize( + "dropna, values", [(True, [12, 3]), (False, [12, 3, 6],)], +) +def test_groupby_dropna_datetime_like_data( + dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2 +): + # 3729 + df = pd.DataFrame( + { + "values": [1, 2, 3, 4, 5, 6], + "dt": [ + datetime1, + unique_nulls_fixture, + datetime2, + unique_nulls_fixture2, + datetime1, + datetime1, + ], + } + ) + + if dropna: + indexes = [datetime1, datetime2] + else: + indexes = [datetime1, datetime2, np.nan] + + grouped = df.groupby("dt", dropna=dropna).agg({"values": sum}) + expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt")) + + tm.assert_frame_equal(grouped, expected) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 5f904241da485..d6228d031bfd5 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -325,6 +325,78 @@ def test_factorize_na_sentinel(self, sort, na_sentinel, data, uniques): else: tm.assert_extension_array_equal(uniques, expected_uniques) + @pytest.mark.parametrize( + "data, dropna, expected_codes, expected_uniques", + [ + ( + ["a", None, "b", "a"], + True, + np.array([0, -1, 1, 0], dtype=np.int64), + np.array(["a", "b"], dtype=object), + ), + ( + ["a", np.nan, "b", "a"], + True, + np.array([0, -1, 1, 0], dtype=np.int64), + np.array(["a", "b"], dtype=object), + ), + ( + ["a", None, "b", "a"], + False, + np.array([0, 2, 1, 0], dtype=np.int64), + np.array(["a", "b", np.nan], dtype=object), + ), + ( + ["a", np.nan, "b", "a"], + False, + np.array([0, 2, 1, 0], dtype=np.int64), + np.array(["a", "b", np.nan], dtype=object), + ), + ], + ) + def test_object_factorize_dropna( + self, data, dropna, expected_codes, expected_uniques + ): + codes, uniques = algos.factorize(data, dropna=dropna) + + tm.assert_numpy_array_equal(uniques, expected_uniques) + tm.assert_numpy_array_equal(codes, expected_codes) + + @pytest.mark.parametrize( + "data, dropna, expected_codes, expected_uniques", + [ + ( + [1, None, 1, 2], + True, + np.array([0, -1, 0, 1], dtype=np.int64), + np.array([1, 2], dtype="O"), + ), + ( + [1, np.nan, 1, 2], + True, + np.array([0, -1, 0, 1], dtype=np.int64), + np.array([1, 2], dtype=np.float64), + ), + ( + [1, None, 1, 2], + False, + np.array([0, 2, 0, 1], dtype=np.int64), + np.array([1, 2, np.nan], dtype="O"), + ), + ( + [1, np.nan, 1, 2], + False, + np.array([0, 2, 0, 1], dtype=np.int64), + np.array([1, 2, np.nan], dtype=np.float64), + ), + ], + ) + def test_int_factorize_dropna(self, data, dropna, expected_codes, expected_uniques): + codes, uniques = algos.factorize(data, dropna=dropna) + + tm.assert_numpy_array_equal(uniques, expected_uniques) + tm.assert_numpy_array_equal(codes, expected_codes) + class TestUnique: def test_ints(self):
- [x] closes #3729 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Note that this PR will *NOT* fix the issue for `pivot_table` for now, the reason is that there is already an argument called `dropna` in `pivot_table` and it has slightly different meaning, currently it means: `Do not include columns whose entries are all NaN`. I would propose a change in the follow-up PR for this since this is an api change: change the name of current `dropna` to `drop_all_na` maybe? and then add `dropna` to it and it is aligned with the `dropna` in groupby. Summary: After this PR, it will optional to inlcude NaN in group keys, e.g. below, and i also add example in docstring as well: ```python a = [['a', 'b', 12, 12, 12], ['a', None, 12.3, 233., 12], ['b', 'a', 123.23, 123, 1], ['a', 'b', 1, 1, 1.]] df = pd.DataFrame(a, columns=['a', 'b', 'c', 'd', 'e']) df.groupby(by=['a', 'b']).sum() ``` will get ![Screen Shot 2020-01-01 at 11 01 16 AM](https://user-images.githubusercontent.com/9269816/71640222-19e68900-2c86-11ea-8749-c223729eec99.png) with `dropna=False`, ```python df.groupby(by=['a', 'b'], dropna=False).sum() ``` ![Screen Shot 2020-01-01 at 11 01 23 AM](https://user-images.githubusercontent.com/9269816/71640225-2ff44980-2c86-11ea-9c66-a18147179c12.png) For Series, it is the same: ```python s = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) s.groupby(level=0).sum() s.groupby(level=0, dropna=False).sum() ``` ![Screen Shot 2020-01-01 at 11 01 32 AM](https://user-images.githubusercontent.com/9269816/71640228-4d291800-2c86-11ea-9d66-4a4be1a7347d.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/30584
2019-12-31T16:07:32Z
2020-05-09T20:10:36Z
2020-05-09T20:10:36Z
2020-05-28T18:48:46Z
TYP: Add types to top-level funcs, step 2
diff --git a/pandas/_typing.py b/pandas/_typing.py index 7b89486751f12..14cf5157cea1d 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -21,9 +21,10 @@ from pandas.core.arrays.base import ExtensionArray # noqa: F401 from pandas.core.dtypes.dtypes import ExtensionDtype # noqa: F401 from pandas.core.indexes.base import Index # noqa: F401 - from pandas.core.series import Series # noqa: F401 from pandas.core.generic import NDFrame # noqa: F401 from pandas import Interval # noqa: F401 + from pandas.core.series import Series # noqa: F401 + from pandas.core.frame import DataFrame # noqa: F401 # array-like @@ -41,7 +42,19 @@ Dtype = Union[str, np.dtype, "ExtensionDtype"] FilePathOrBuffer = Union[str, Path, IO[AnyStr]] + +# FrameOrSeriesUnion means either a DataFrame or a Series. E.g. +# `def func(a: FrameOrSeriesUnion) -> FrameOrSeriesUnion: ...` means that if a Series +# is passed in, either a Series or DataFrame is returned, and if a DataFrame is passed +# in, either a DataFrame or a Series is returned. +FrameOrSeriesUnion = Union["DataFrame", "Series"] + +# FrameOrSeries is stricter and ensures that the same subclass of NDFrame always is +# used. E.g. `def func(a: FrameOrSeries) -> FrameOrSeries: ...` means that if a +# Series is passed into a function, a Series is always returned and if a DataFrame is +# passed in, a DataFrame is always returned. FrameOrSeries = TypeVar("FrameOrSeries", bound="NDFrame") + Axis = Union[str, int] Ordered = Optional[bool] JSONSerializable = Union[PythonScalar, List, Dict] diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 42cfd9d54ac19..39e8e9008a844 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -3,7 +3,7 @@ intended for public consumption """ from textwrap import dedent -from typing import Dict, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union from warnings import catch_warnings, simplefilter, warn import numpy as np @@ -50,6 +50,9 @@ from pandas.core.construction import array, extract_array from pandas.core.indexers import validate_indices +if TYPE_CHECKING: + from pandas import Series + _shared_docs: Dict[str, str] = {} @@ -651,7 +654,7 @@ def value_counts( normalize: bool = False, bins=None, dropna: bool = True, -) -> ABCSeries: +) -> "Series": """ Compute a histogram of the counts of non-null values. @@ -793,7 +796,7 @@ def duplicated(values, keep="first") -> np.ndarray: return f(values, keep=keep) -def mode(values, dropna: bool = True) -> ABCSeries: +def mode(values, dropna: bool = True) -> "Series": """ Returns the mode(s) of an array. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 97b218878f4cc..ba0c0e7d66b1d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5878,7 +5878,7 @@ def groupby( @Substitution("") @Appender(_shared_docs["pivot"]) - def pivot(self, index=None, columns=None, values=None): + def pivot(self, index=None, columns=None, values=None) -> "DataFrame": from pandas.core.reshape.pivot import pivot return pivot(self, index=index, columns=columns, values=values) @@ -6025,7 +6025,7 @@ def pivot_table( dropna=True, margins_name="All", observed=False, - ): + ) -> "DataFrame": from pandas.core.reshape.pivot import pivot_table return pivot_table( diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 8886732fc8d79..2007f6aa32a57 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -2,10 +2,12 @@ concat routines """ -from typing import Hashable, List, Optional +from typing import Hashable, List, Mapping, Optional, Sequence, Union, overload import numpy as np +from pandas._typing import FrameOrSeriesUnion + from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.arrays.categorical import ( factorize_from_iterable, @@ -26,8 +28,27 @@ # Concatenate DataFrame objects +@overload +def concat( + objs: Union[Sequence["DataFrame"], Mapping[Optional[Hashable], "DataFrame"]], + axis=0, + join: str = "outer", + ignore_index: bool = False, + keys=None, + levels=None, + names=None, + verify_integrity: bool = False, + sort: bool = False, + copy: bool = True, +) -> "DataFrame": + ... + + +@overload def concat( - objs, + objs: Union[ + Sequence[FrameOrSeriesUnion], Mapping[Optional[Hashable], FrameOrSeriesUnion] + ], axis=0, join: str = "outer", ignore_index: bool = False, @@ -37,7 +58,24 @@ def concat( verify_integrity: bool = False, sort: bool = False, copy: bool = True, -): +) -> FrameOrSeriesUnion: + ... + + +def concat( + objs: Union[ + Sequence[FrameOrSeriesUnion], Mapping[Optional[Hashable], FrameOrSeriesUnion] + ], + axis=0, + join="outer", + ignore_index: bool = False, + keys=None, + levels=None, + names=None, + verify_integrity: bool = False, + sort: bool = False, + copy: bool = True, +) -> FrameOrSeriesUnion: """ Concatenate pandas objects along a particular axis with optional set logic along the other axes. diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 38bda94489d01..722dd8751dfad 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -192,7 +192,9 @@ def lreshape(data: DataFrame, groups, dropna: bool = True, label=None) -> DataFr return data._constructor(mdata, columns=id_cols + pivot_cols) -def wide_to_long(df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+"): +def wide_to_long( + df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+" +) -> DataFrame: r""" Wide panel to long format. Less flexible but more user-friendly than melt. diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index c544c132d6921..b443ba142369c 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Callable, Dict, Tuple, Union +from typing import TYPE_CHECKING, Callable, Dict, List, Tuple, Union import numpy as np @@ -40,7 +40,7 @@ def pivot_table( columns = _convert_by(columns) if isinstance(aggfunc, list): - pieces = [] + pieces: List[DataFrame] = [] keys = [] for func in aggfunc: table = pivot_table( @@ -459,7 +459,7 @@ def crosstab( margins_name: str = "All", dropna: bool = True, normalize=False, -): +) -> "DataFrame": """ Compute a simple cross tabulation of two (or more) factors. By default computes a frequency table of the factors unless an array of values and an diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 004bd0199eb58..da92e1154556a 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -1,5 +1,6 @@ from functools import partial import itertools +from typing import List import numpy as np @@ -755,7 +756,7 @@ def get_dummies( sparse=False, drop_first=False, dtype=None, -): +) -> "DataFrame": """ Convert categorical variable into dummy/indicator variables. @@ -899,7 +900,7 @@ def check_len(item, name): if data_to_encode.shape == data.shape: # Encoding the entire df, do not prepend any dropped columns - with_dummies = [] + with_dummies: List[DataFrame] = [] elif columns is not None: # Encoding only cols specified in columns. Get all cols not in # columns to prepend to result. diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index ea22999470102..3020ac421fc2f 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -281,7 +281,9 @@ def _chk_truncate(self) -> None: series = series.iloc[:max_rows] else: row_num = max_rows // 2 - series = concat((series.iloc[:row_num], series.iloc[-row_num:])) + series = series._ensure_type( + concat((series.iloc[:row_num], series.iloc[-row_num:])) + ) self.tr_row_num = row_num else: self.tr_row_num = None
Next step to #30565. Next up will be the pd.read_* functions.
https://api.github.com/repos/pandas-dev/pandas/pulls/30582
2019-12-31T13:32:48Z
2020-01-05T16:21:07Z
2020-01-05T16:21:07Z
2020-01-05T16:27:52Z
BUG: Series __setitem__ gives wrong result with bool indexer
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 570ce11238327..7bba6feb1c2e8 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -858,6 +858,7 @@ Indexing - Bug when indexing with ``.loc`` where the index was a :class:`CategoricalIndex` with non-string categories didn't work (:issue:`17569`, :issue:`30225`) - :meth:`Index.get_indexer_non_unique` could fail with `TypeError` in some cases, such as when searching for ints in a string index (:issue:`28257`) - Bug in :meth:`Float64Index.get_loc` incorrectly raising ``TypeError`` instead of ``KeyError`` (:issue:`29189`) +- Bug in :meth:`Series.__setitem__` incorrectly assigning values with boolean indexer when the length of new data matches the number of ``True`` values and new data is not a ``Series`` or an ``np.array`` (:issue:`30567`) Missing ^^^^^^^ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 664f6ea75a3be..e47783221ff5d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -944,15 +944,20 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) and np.any(mask[mask]) and getattr(new, "ndim", 1) == 1 ): - - if not ( - mask.shape[-1] == len(new) - or mask[mask].shape[-1] == len(new) - or len(new) == 1 - ): + if mask[mask].shape[-1] == len(new): + # GH 30567 + # If length of ``new`` is less than the length of ``new_values``, + # `np.putmask` would first repeat the ``new`` array and then + # assign the masked values hence produces incorrect result. + # `np.place` on the other hand uses the ``new`` values at it is + # to place in the masked locations of ``new_values`` + np.place(new_values, mask, new) + elif mask.shape[-1] == len(new) or len(new) == 1: + np.putmask(new_values, mask, new) + else: raise ValueError("cannot assign mismatch length to masked array") - - np.putmask(new_values, mask, new) + else: + np.putmask(new_values, mask, new) # maybe upcast me elif mask.any(): diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index d75afd1540f22..ea003a72490f9 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -1190,3 +1190,13 @@ def test_duplicate_index_mistyped_key_raises_keyerror(): with pytest.raises(KeyError): ser.index._engine.get_loc(None) + + +def test_setitem_with_bool_mask_and_values_matching_n_trues_in_length(): + # GH 30567 + ser = pd.Series([None] * 10) + mask = [False] * 3 + [True] * 5 + [False] * 2 + ser[mask] = range(5) + result = ser + expected = pd.Series([None] * 3 + list(range(5)) + [None] * 2).astype("object") + tm.assert_series_equal(result, expected)
Series.__setitem__ gives wrong result with bool indexer and when length of new data matches the number of Trues and new data is neither a Series nor a numpy array - [x] closes #30567 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30580
2019-12-31T12:55:01Z
2020-01-02T13:59:22Z
2020-01-02T13:59:22Z
2020-01-03T11:41:09Z
STY: Concat string
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index b0eeb7b96e0eb..850217c8a7803 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -286,7 +286,7 @@ def _load_obj(name): continue if "obj" not in locals(): - raise ImportError("No module can be imported " 'from "{}"'.format(name)) + raise ImportError(f'No module can be imported from "{name}"') for part in func_parts: obj = getattr(obj, part)
- [x] ref #30454 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30579
2019-12-31T12:11:41Z
2019-12-31T13:15:01Z
2019-12-31T13:15:01Z
2020-01-01T19:22:34Z
ENH: Add ignore_index to sort_index
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 3810ab37822cc..1f25662bd6c20 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -221,6 +221,7 @@ Other enhancements - DataFrame constructor preserve `ExtensionArray` dtype with `ExtensionArray` (:issue:`11363`) - :meth:`DataFrame.sort_values` and :meth:`Series.sort_values` have gained ``ignore_index`` keyword to be able to reset index after sorting (:issue:`30114`) - :meth:`DataFrame.to_markdown` and :meth:`Series.to_markdown` added (:issue:`11052`) +- :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` have gained ``ignore_index`` keyword to reset index (:issue:`30114`) - :meth:`DataFrame.drop_duplicates` has gained ``ignore_index`` keyword to reset index (:issue:`30114`) - Added new writer for exporting Stata dta files in version 118, ``StataWriter118``. This format supports exporting strings containing Unicode characters (:issue:`23573`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fb1ba4f6f53f8..cdbeeae984456 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1988,7 +1988,7 @@ def to_feather(self, path): @Substitution(klass="DataFrame") @Appender(_shared_docs["to_markdown"]) def to_markdown( - self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs, + self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs ) -> Optional[str]: kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") @@ -4831,6 +4831,7 @@ def sort_index( kind="quicksort", na_position="last", sort_remaining=True, + ignore_index: bool = False, ): # TODO: this can be combined with Series.sort_index impl as @@ -4881,6 +4882,9 @@ def sort_index( # reconstruct axis if needed new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic() + if ignore_index: + new_data.axes[1] = ibase.default_index(len(indexer)) + if inplace: return self._update_inplace(new_data) else: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 85bbf9b553b0a..ea8cd3e9f341b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4178,6 +4178,7 @@ def sort_index( kind: str = "quicksort", na_position: str = "last", sort_remaining: bool_t = True, + ignore_index: bool_t = False, ): """ Sort object by labels (along an axis). @@ -4204,6 +4205,10 @@ def sort_index( sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + + .. versionadded:: 1.0.0 Returns ------- diff --git a/pandas/core/series.py b/pandas/core/series.py index aa5af9bb893fa..99d19035b1c77 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1433,7 +1433,7 @@ def to_string( @Substitution(klass="Series") @Appender(generic._shared_docs["to_markdown"]) def to_markdown( - self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs, + self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs ) -> Optional[str]: return self.to_frame().to_markdown(buf, mode, **kwargs) @@ -2963,6 +2963,7 @@ def sort_index( kind="quicksort", na_position="last", sort_remaining=True, + ignore_index: bool = False, ): """ Sort Series by index labels. @@ -2991,6 +2992,10 @@ def sort_index( sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + + .. versionadded:: 1.0.0 Returns ------- @@ -3118,6 +3123,9 @@ def sort_index( new_values = self._values.take(indexer) result = self._constructor(new_values, index=new_index) + if ignore_index: + result.index = ibase.default_index(len(result)) + if inplace: self._update_inplace(result) else: @@ -4478,9 +4486,7 @@ def to_period(self, freq=None, copy=True): hist = pandas.plotting.hist_series -Series._setup_axes( - ["index"], docs={"index": "The index (axis labels) of the Series."}, -) +Series._setup_axes(["index"], docs={"index": "The index (axis labels) of the Series."}) Series._add_numeric_operations() Series._add_series_or_dataframe_operations() diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 4f311bbaa8eb9..6866aab11d2fa 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -229,3 +229,85 @@ def test_sort_index_intervalindex(self): ) result = result.columns.levels[1].categories tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "original_dict, sorted_dict, ascending, ignore_index, output_index", + [ + ({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]), + ({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]), + ({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]), + ({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]), + ], + ) + def test_sort_index_ignore_index( + self, original_dict, sorted_dict, ascending, ignore_index, output_index + ): + # GH 30114 + original_index = [2, 5, 3] + df = DataFrame(original_dict, index=original_index) + expected_df = DataFrame(sorted_dict, index=output_index) + + sorted_df = df.sort_index(ascending=ascending, ignore_index=ignore_index) + tm.assert_frame_equal(sorted_df, expected_df) + tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index)) + + # Test when inplace is True + copied_df = df.copy() + copied_df.sort_index( + ascending=ascending, ignore_index=ignore_index, inplace=True + ) + tm.assert_frame_equal(copied_df, expected_df) + tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index)) + + @pytest.mark.parametrize( + "original_dict, sorted_dict, ascending, ignore_index, output_index", + [ + ( + {"M1": [1, 2], "M2": [3, 4]}, + {"M1": [1, 2], "M2": [3, 4]}, + True, + True, + [0, 1], + ), + ( + {"M1": [1, 2], "M2": [3, 4]}, + {"M1": [2, 1], "M2": [4, 3]}, + False, + True, + [0, 1], + ), + ( + {"M1": [1, 2], "M2": [3, 4]}, + {"M1": [1, 2], "M2": [3, 4]}, + True, + False, + MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB")), + ), + ( + {"M1": [1, 2], "M2": [3, 4]}, + {"M1": [2, 1], "M2": [4, 3]}, + False, + False, + MultiIndex.from_tuples([[3, 4], [2, 1]], names=list("AB")), + ), + ], + ) + def test_sort_index_ignore_index_multi_index( + self, original_dict, sorted_dict, ascending, ignore_index, output_index + ): + # GH 30114, this is to test ignore_index on MulitIndex of index + mi = MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB")) + df = DataFrame(original_dict, index=mi) + expected_df = DataFrame(sorted_dict, index=output_index) + + sorted_df = df.sort_index(ascending=ascending, ignore_index=ignore_index) + tm.assert_frame_equal(sorted_df, expected_df) + tm.assert_frame_equal(df, DataFrame(original_dict, index=mi)) + + # Test when inplace is True + copied_df = df.copy() + copied_df.sort_index( + ascending=ascending, ignore_index=ignore_index, inplace=True + ) + tm.assert_frame_equal(copied_df, expected_df) + tm.assert_frame_equal(df, DataFrame(original_dict, index=mi)) diff --git a/pandas/tests/series/methods/test_sort_index.py b/pandas/tests/series/methods/test_sort_index.py index ab15b8c814029..a9b73c2344681 100644 --- a/pandas/tests/series/methods/test_sort_index.py +++ b/pandas/tests/series/methods/test_sort_index.py @@ -135,3 +135,34 @@ def test_sort_index_intervals(self): [3, 2, 1, np.nan], IntervalIndex.from_arrays([3, 2, 1, 0], [4, 3, 2, 1]) ) tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "original_list, sorted_list, ascending, ignore_index, output_index", + [ + ([2, 3, 6, 1], [2, 3, 6, 1], True, True, [0, 1, 2, 3]), + ([2, 3, 6, 1], [2, 3, 6, 1], True, False, [0, 1, 2, 3]), + ([2, 3, 6, 1], [1, 6, 3, 2], False, True, [0, 1, 2, 3]), + ([2, 3, 6, 1], [1, 6, 3, 2], False, False, [3, 2, 1, 0]), + ], + ) + def test_sort_index_ignore_index( + self, original_list, sorted_list, ascending, ignore_index, output_index + ): + # GH 30114 + ser = Series(original_list) + expected = Series(sorted_list, index=output_index) + + # Test when inplace is False + sorted_sr = ser.sort_index(ascending=ascending, ignore_index=ignore_index) + tm.assert_series_equal(sorted_sr, expected) + + tm.assert_series_equal(ser, Series(original_list)) + + # Test when inplace is True + copied_sr = ser.copy() + copied_sr.sort_index( + ascending=ascending, ignore_index=ignore_index, inplace=True + ) + tm.assert_series_equal(copied_sr, expected) + + tm.assert_series_equal(ser, Series(original_list))
- [x] closes #30114 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry @jreback I just looked back at #30114 and found out that i overlooked to add `ignore_index` to `sort_index`, and this might be the reason this issue is still left open. I added this to `sort_index` to close this issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/30578
2019-12-31T10:46:18Z
2020-01-03T13:10:49Z
2020-01-03T13:10:49Z
2020-02-13T00:54:28Z
CLN: Clean _test_moments_consistency in common.py
diff --git a/pandas/tests/window/common.py b/pandas/tests/window/common.py index c3648bc619c50..77f59bf919168 100644 --- a/pandas/tests/window/common.py +++ b/pandas/tests/window/common.py @@ -212,40 +212,23 @@ def _create_data(self): super()._create_data() self.data = _consistency_data - def _test_moments_consistency( - self, - min_periods, - count, - mean, - mock_mean, - corr, - var_unbiased=None, - std_unbiased=None, - cov_unbiased=None, - var_biased=None, - std_biased=None, - cov_biased=None, - var_debiasing_factors=None, - ): - def _non_null_values(x): - values = x.values.ravel() - return set(values[notna(values)].tolist()) - + def _test_moments_consistency_mock_mean(self, mean, mock_mean): for (x, is_constant, no_nans) in self.data: - count_x = count(x) mean_x = mean(x) + # check that correlation of a series with itself is either 1 or NaN if mock_mean: # check that mean equals mock_mean expected = mock_mean(x) tm.assert_equal(mean_x, expected.astype("float64")) + def _test_moments_consistency_is_constant(self, min_periods, count, mean, corr): + for (x, is_constant, no_nans) in self.data: + count_x = count(x) + mean_x = mean(x) # check that correlation of a series with itself is either 1 or NaN corr_x_x = corr(x, x) - # assert _non_null_values(corr_x_x).issubset(set([1.])) - # restore once rolling_cov(x, x) is identically equal to var(x) - if is_constant: exp = x.max() if isinstance(x, Series) else x.max().max() @@ -258,6 +241,10 @@ def _non_null_values(x): expected[:] = np.nan tm.assert_equal(corr_x_x, expected) + def _test_moments_consistency_var_debiasing_factors( + self, var_biased=None, var_unbiased=None, var_debiasing_factors=None + ): + for (x, is_constant, no_nans) in self.data: if var_unbiased and var_biased and var_debiasing_factors: # check variance debiasing factors var_unbiased_x = var_unbiased(x) @@ -265,6 +252,24 @@ def _non_null_values(x): var_debiasing_factors_x = var_debiasing_factors(x) tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) + def _test_moments_consistency( + self, + min_periods, + count, + mean, + corr, + var_unbiased=None, + std_unbiased=None, + cov_unbiased=None, + var_biased=None, + std_biased=None, + cov_biased=None, + ): + + for (x, is_constant, no_nans) in self.data: + count_x = count(x) + mean_x = mean(x) + for (std, var, cov) in [ (std_biased, var_biased, cov_biased), (std_unbiased, var_unbiased, cov_unbiased), diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py index bf2bd1420b7f4..489c1ff14ecfd 100644 --- a/pandas/tests/window/moments/test_moments_ewm.py +++ b/pandas/tests/window/moments/test_moments_ewm.py @@ -379,6 +379,43 @@ def _ewma(s, com, min_periods, adjust, ignore_na): return result com = 3.0 + self._test_moments_consistency_mock_mean( + mean=lambda x: x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).mean(), + mock_mean=lambda x: _ewma( + x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ), + ) + + self._test_moments_consistency_is_constant( + min_periods=min_periods, + count=lambda x: x.expanding().count(), + mean=lambda x: x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).mean(), + corr=lambda x, y: x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).corr(y), + ) + + self._test_moments_consistency_var_debiasing_factors( + var_unbiased=lambda x: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=False) + ), + var_biased=lambda x: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=True) + ), + var_debiasing_factors=lambda x: ( + _variance_debiasing_factors( + x, com=com, adjust=adjust, ignore_na=ignore_na + ) + ), + ) # test consistency between different ewm* moments self._test_moments_consistency( min_periods=min_periods, @@ -386,9 +423,6 @@ def _ewma(s, com, min_periods, adjust, ignore_na): mean=lambda x: x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).mean(), - mock_mean=lambda x: _ewma( - x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ), corr=lambda x, y: x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).corr(y), @@ -420,9 +454,4 @@ def _ewma(s, com, min_periods, adjust, ignore_na): com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).cov(y, bias=True) ), - var_debiasing_factors=lambda x: ( - _variance_debiasing_factors( - x, com=com, adjust=adjust, ignore_na=ignore_na - ) - ), ) diff --git a/pandas/tests/window/moments/test_moments_expanding.py b/pandas/tests/window/moments/test_moments_expanding.py index 3361ecab28669..d311937e234d8 100644 --- a/pandas/tests/window/moments/test_moments_expanding.py +++ b/pandas/tests/window/moments/test_moments_expanding.py @@ -328,12 +328,31 @@ def test_expanding_consistency(self, min_periods): ) # test consistency between different expanding_* moments + self._test_moments_consistency_mock_mean( + mean=lambda x: x.expanding(min_periods=min_periods).mean(), + mock_mean=lambda x: x.expanding(min_periods=min_periods).sum() + / x.expanding().count(), + ) + + self._test_moments_consistency_is_constant( + min_periods=min_periods, + count=lambda x: x.expanding().count(), + mean=lambda x: x.expanding(min_periods=min_periods).mean(), + corr=lambda x, y: x.expanding(min_periods=min_periods).corr(y), + ) + + self._test_moments_consistency_var_debiasing_factors( + var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), + var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), + var_debiasing_factors=lambda x: ( + x.expanding().count() + / (x.expanding().count() - 1.0).replace(0.0, np.nan) + ), + ) self._test_moments_consistency( min_periods=min_periods, count=lambda x: x.expanding().count(), mean=lambda x: x.expanding(min_periods=min_periods).mean(), - mock_mean=lambda x: x.expanding(min_periods=min_periods).sum() - / x.expanding().count(), corr=lambda x, y: x.expanding(min_periods=min_periods).corr(y), var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), std_unbiased=lambda x: x.expanding(min_periods=min_periods).std(), @@ -343,10 +362,6 @@ def test_expanding_consistency(self, min_periods): cov_biased=lambda x, y: x.expanding(min_periods=min_periods).cov( y, ddof=0 ), - var_debiasing_factors=lambda x: ( - x.expanding().count() - / (x.expanding().count() - 1.0).replace(0.0, np.nan) - ), ) # test consistency between expanding_xyz() and either (a) diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py index 631b13f874ca7..c110ed172ecb9 100644 --- a/pandas/tests/window/moments/test_moments_rolling.py +++ b/pandas/tests/window/moments/test_moments_rolling.py @@ -945,9 +945,7 @@ def test_rolling_consistency(self, window, min_periods, center): ) # test consistency between different rolling_* moments - self._test_moments_consistency( - min_periods=min_periods, - count=lambda x: (x.rolling(window=window, center=center).count()), + self._test_moments_consistency_mock_mean( mean=lambda x: ( x.rolling( window=window, min_periods=min_periods, center=center @@ -962,6 +960,53 @@ def test_rolling_consistency(self, window, min_periods, center): ).count() ) ), + ) + + self._test_moments_consistency_is_constant( + min_periods=min_periods, + count=lambda x: (x.rolling(window=window, center=center).count()), + mean=lambda x: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).mean() + ), + corr=lambda x, y: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).corr(y) + ), + ) + + self._test_moments_consistency_var_debiasing_factors( + var_unbiased=lambda x: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).var() + ), + var_biased=lambda x: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).var(ddof=0) + ), + var_debiasing_factors=lambda x: ( + x.rolling(window=window, center=center) + .count() + .divide( + (x.rolling(window=window, center=center).count() - 1.0).replace( + 0.0, np.nan + ) + ) + ), + ) + + self._test_moments_consistency( + min_periods=min_periods, + count=lambda x: (x.rolling(window=window, center=center).count()), + mean=lambda x: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).mean() + ), corr=lambda x, y: ( x.rolling( window=window, min_periods=min_periods, center=center @@ -997,15 +1042,6 @@ def test_rolling_consistency(self, window, min_periods, center): window=window, min_periods=min_periods, center=center ).cov(y, ddof=0) ), - var_debiasing_factors=lambda x: ( - x.rolling(window=window, center=center) - .count() - .divide( - (x.rolling(window=window, center=center).count() - 1.0).replace( - 0.0, np.nan - ) - ) - ), ) # test consistency between rolling_xyz() and either (a)
- [ ] xref #30486 #30542 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30577
2019-12-31T08:38:02Z
2020-01-01T02:15:33Z
2020-01-01T02:15:33Z
2020-01-01T02:17:37Z
CLN: assorted cleanups
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 8755abe642068..77c4ed6160dbe 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -794,6 +794,7 @@ Datetimelike - Bug in :class:`DatetimeIndex` addition when adding a non-optimized :class:`DateOffset` incorrectly dropping timezone information (:issue:`30336`) - Bug in :meth:`DataFrame.drop` where attempting to drop non-existent values from a DatetimeIndex would yield a confusing error message (:issue:`30399`) - Bug in :meth:`DataFrame.append` would remove the timezone-awareness of new data (:issue:`30238`) +- Bug in :meth:`Series.cummin` and :meth:`Series.cummax` with timezone-aware dtype incorrectly dropping its timezone (:issue:`15553`) - Bug in :class:`DatetimeArray`, :class:`TimedeltaArray`, and :class:`PeriodArray` where inplace addition and subtraction did not actually operate inplace (:issue:`24115`) Timedelta diff --git a/pandas/_config/config.py b/pandas/_config/config.py index 6844df495547a..0a3009f74492f 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -197,7 +197,7 @@ def __setattr__(self, key, val): else: raise OptionError("You can only set the value of existing options") - def __getattr__(self, key): + def __getattr__(self, key: str): prefix = object.__getattribute__(self, "prefix") if prefix: prefix += "." diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index fffe09a74571e..7158f251ad805 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -169,13 +169,6 @@ def validate_clip_with_axis(axis, args, kwargs): return axis -COMPRESS_DEFAULTS: "OrderedDict[str, Any]" = OrderedDict() -COMPRESS_DEFAULTS["axis"] = None -COMPRESS_DEFAULTS["out"] = None -validate_compress = CompatValidator( - COMPRESS_DEFAULTS, fname="compress", method="both", max_fname_arg_count=1 -) - CUM_FUNC_DEFAULTS: "OrderedDict[str, Any]" = OrderedDict() CUM_FUNC_DEFAULTS["dtype"] = None CUM_FUNC_DEFAULTS["out"] = None diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ff411c3489e1f..2b108d3997235 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11120,6 +11120,7 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs): def na_accum_func(blk_values): # We will be applying this function to block values if blk_values.dtype.kind in ["m", "M"]: + # GH#30460, GH#29058 # numpy 1.18 started sorting NaTs at the end instead of beginning, # so we need to work around to maintain backwards-consistency. orig_dtype = blk_values.dtype diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 6c7c3c1a57d6f..8ff055ff4c1be 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -325,7 +325,7 @@ def f(self): f.__name__ = "plot" return self._groupby.apply(f) - def __getattr__(self, name): + def __getattr__(self, name: str): def attr(*args, **kwargs): def f(self): return getattr(self.plot, name)(*args, **kwargs) @@ -570,7 +570,7 @@ def _set_result_index_ordered(self, result): def _dir_additions(self): return self.obj._dir_additions() | self._apply_whitelist - def __getattr__(self, attr): + def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self.obj: diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 9ae0aa930779b..931653b63af36 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -96,7 +96,7 @@ def __str__(self) -> str: ) return f"{type(self).__name__} [{', '.join(attrs)}]" - def __getattr__(self, attr): + def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self._attributes: @@ -131,7 +131,7 @@ def ax(self): return self.groupby.ax @property - def _typ(self): + def _typ(self) -> str: """ Masquerade for compat as a Series or a DataFrame. """ @@ -140,7 +140,7 @@ def _typ(self): return "dataframe" @property - def _from_selection(self): + def _from_selection(self) -> bool: """ Is the resampling from a DataFrame column or MultiIndex level. """ @@ -316,7 +316,7 @@ def _downsample(self, f): def _upsample(self, f, limit=None, fill_value=None): raise AbstractMethodError(self) - def _gotitem(self, key, ndim, subset=None): + def _gotitem(self, key, ndim: int, subset=None): """ Sub-classes to define. Return a sliced object. @@ -1407,7 +1407,7 @@ def _get_resampler(self, obj, kind=None): f"but got an instance of '{type(ax).__name__}'" ) - def _get_grouper(self, obj, validate=True): + def _get_grouper(self, obj, validate: bool = True): # create the resampler and return our binner r = self._get_resampler(obj) r._set_binner() diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index cea70012b47ea..a3d9dbfba9e71 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -472,9 +472,9 @@ def _get_result_dim(self) -> int: else: return self.objs[0].ndim - def _get_new_axes(self): + def _get_new_axes(self) -> List[Index]: ndim = self._get_result_dim() - new_axes = [None] * ndim + new_axes: List = [None] * ndim for i in range(ndim): if i == self.axis: diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 5b0fbbb3518d2..176406f953f67 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -184,7 +184,7 @@ def _gotitem(self, key, ndim, subset=None): self._selection = key return self - def __getattr__(self, attr): + def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self.obj: diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 58ab44fba08cf..2f1fa3ce627e6 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -711,7 +711,6 @@ def test_constructor_timestamp_near_dst(self): expected = DatetimeIndex([ts[0].to_pydatetime(), ts[1].to_pydatetime()]) tm.assert_index_equal(result, expected) - # TODO(GH-24559): Remove the xfail for the tz-aware case. @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) @pytest.mark.parametrize("box", [np.array, partial(np.array, dtype=object), list]) @pytest.mark.parametrize( diff --git a/pandas/util/_depr_module.py b/pandas/util/_depr_module.py index 5733663dd7ab3..5694ca24aab57 100644 --- a/pandas/util/_depr_module.py +++ b/pandas/util/_depr_module.py @@ -46,7 +46,7 @@ def __repr__(self) -> str: __str__ = __repr__ - def __getattr__(self, name): + def __getattr__(self, name: str): if name in self.self_dir: return object.__getattribute__(self, name)
Salvaging what I can from some abandoned branches.
https://api.github.com/repos/pandas-dev/pandas/pulls/30575
2019-12-30T23:01:39Z
2019-12-31T09:07:24Z
2019-12-31T09:07:24Z
2019-12-31T16:05:42Z
API: Raise when setting name via level
diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst index 8223b831ebe2d..d6f5c0c758b60 100644 --- a/doc/source/user_guide/advanced.rst +++ b/doc/source/user_guide/advanced.rst @@ -565,19 +565,15 @@ When working with an ``Index`` object directly, rather than via a ``DataFrame``, mi2 = mi.rename("new name", level=0) mi2 -.. warning:: - Prior to pandas 1.0.0, you could also set the names of a ``MultiIndex`` - by updating the name of a level. +You cannot set the names of the MultiIndex via a level. - .. code-block:: none +.. ipython:: python + :okexcept: - >>> mi.levels[0].name = 'name via level' - >>> mi.names[0] # only works for older pandas - 'name via level' + mi.levels[0].name = "name via level" - As of pandas 1.0, this will *silently* fail to update the names - of the MultiIndex. Use :meth:`Index.set_names` instead. +Use :meth:`Index.set_names` instead. Sorting a ``MultiIndex`` ------------------------ diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 77c4ed6160dbe..a3ccf66334e3d 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -255,10 +255,10 @@ For backwards compatibility, you can still *access* the names via the levels. mi.levels[0].name However, it is no longer possible to *update* the names of the ``MultiIndex`` -via the name of the level. The following will **silently** fail to update the -name of the ``MultiIndex`` +via the level. .. ipython:: python + :okexcept: mi.levels[0].name = "new name" mi.names diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a3808f6f4a37e..fbbde715bc8a4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -240,6 +240,10 @@ def _outer_indexer(self, left, right): _data: Union[ExtensionArray, np.ndarray] _id = None _name: Optional[Hashable] = None + # MultiIndex.levels previously allowed setting the index name. We + # don't allow this anymore, and raise if it happens rather than + # failing silently. + _no_setting_name: bool = False _comparables = ["name"] _attributes = ["name"] _is_numeric_dtype = False @@ -1214,6 +1218,12 @@ def name(self): @name.setter def name(self, value): + if self._no_setting_name: + # Used in MultiIndex.levels to avoid silently ignoring name updates. + raise RuntimeError( + "Cannot set name on a level of a MultiIndex. Use " + "'MultiIndex.set_names' instead." + ) maybe_extract_name(value, None, type(self)) self._name = value diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index ba476f9e25ee6..579860f8557e7 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -253,6 +253,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs): setattr(result, k, v) result._reset_identity() + result._no_setting_name = False return result # -------------------------------------------------------------------- diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9ff968bc554e4..f6123633338c1 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -302,6 +302,7 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): result = object.__new__(cls) result._data = dtarr result.name = name + result._no_setting_name = False # For groupby perf. See note in indexes/base about _index_data result._index_data = dtarr._data result._reset_identity() diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 52df491725504..6d12f56151ba9 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -234,6 +234,7 @@ def _simple_new(cls, array, name, closed=None): result = IntervalMixin.__new__(cls) result._data = array result.name = name + result._no_setting_name = False result._reset_identity() return result diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index dac9b20104c36..360cd3fdbaa3f 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -628,6 +628,9 @@ def levels(self): result = [ x._shallow_copy(name=name) for x, name in zip(self._levels, self._names) ] + for level in result: + # disallow midx.levels[0].name = "foo" + level._no_setting_name = True return FrozenList(result) @property diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py index 5c3a48c9dd481..47f2ec4c8a418 100644 --- a/pandas/tests/indexes/multi/test_names.py +++ b/pandas/tests/indexes/multi/test_names.py @@ -124,3 +124,20 @@ def test_get_names_from_levels(): assert idx.levels[0].name == "a" assert idx.levels[1].name == "b" + + +def test_setting_names_from_levels_raises(): + idx = pd.MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) + with pytest.raises(RuntimeError, match="set_names"): + idx.levels[0].name = "foo" + + with pytest.raises(RuntimeError, match="set_names"): + idx.levels[1].name = "foo" + + new = pd.Series(1, index=idx.levels[0]) + with pytest.raises(RuntimeError, match="set_names"): + new.index.name = "bar" + + assert pd.Index._no_setting_name is False + assert pd.Int64Index._no_setting_name is False + assert pd.RangeIndex._no_setting_name is False
Closes https://github.com/pandas-dev/pandas/issues/29032 This is extremely ugly, but gets the job done. It's probably worth doing to avoid silently failing, but not sure. cc @topper-123.
https://api.github.com/repos/pandas-dev/pandas/pulls/30574
2019-12-30T22:52:50Z
2020-01-03T02:16:40Z
2020-01-03T02:16:40Z
2020-01-03T02:16:43Z
TYP: check_untyped_defs io.json._normalize
diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index aa14c3f3a63f3..c80b197214fc8 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -112,7 +112,7 @@ def nested_to_record( def _json_normalize( data: Union[Dict, List[Dict]], record_path: Optional[Union[str, List]] = None, - meta: Optional[Union[str, List]] = None, + meta: Optional[Union[str, List[Union[str, List[str]]]]] = None, meta_prefix: Optional[str] = None, record_prefix: Optional[str] = None, errors: Optional[str] = "raise", @@ -265,21 +265,21 @@ def _pull_field(js, spec): elif not isinstance(meta, list): meta = [meta] - meta = [m if isinstance(m, list) else [m] for m in meta] + _meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records: List = [] lengths = [] meta_vals: DefaultDict = defaultdict(list) - meta_keys = [sep.join(val) for val in meta] + meta_keys = [sep.join(val) for val in _meta] def _recursive_extract(data, path, seen_meta, level=0): if isinstance(data, dict): data = [data] if len(path) > 1: for obj in data: - for val, key in zip(meta, meta_keys): + for val, key in zip(_meta, meta_keys): if level + 1 == len(val): seen_meta[key] = _pull_field(obj, val[-1]) @@ -296,7 +296,7 @@ def _recursive_extract(data, path, seen_meta, level=0): # For repeating the metadata later lengths.append(len(recs)) - for val, key in zip(meta, meta_keys): + for val, key in zip(_meta, meta_keys): if level + 1 > len(val): meta_val = seen_meta[key] else: diff --git a/setup.cfg b/setup.cfg index 84b3f9409b9ba..1a946fa03346b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -313,9 +313,6 @@ check_untyped_defs=False [mypy-pandas.io.json._json] check_untyped_defs=False -[mypy-pandas.io.json._normalize] -check_untyped_defs=False - [mypy-pandas.io.json._table_schema] check_untyped_defs=False
pandas\io\json\_normalize.py:282: error: Argument 1 to "zip" has incompatible type "Union[str, List[Any], None]"; expected "Iterable[Any]" pandas\io\json\_normalize.py:299: error: Argument 1 to "zip" has incompatible type "Union[str, List[Any], None]"; expected "Iterable[Any]"
https://api.github.com/repos/pandas-dev/pandas/pulls/30573
2019-12-30T22:34:50Z
2019-12-31T12:27:11Z
2019-12-31T12:27:11Z
2019-12-31T14:04:09Z
TYP: check_untyped_defs various
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index e8fd390456f82..0a1a1376bfc8d 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -169,9 +169,9 @@ def __new__(cls) -> "DataFrame": # type: ignore # our Unpickler sub-class to override methods and some dispatcher -# functions for compat - +# functions for compat and uses a non-public class of the pickle module. +# error: Name 'pkl._Unpickler' is not defined class Unpickler(pkl._Unpickler): # type: ignore def find_class(self, module, name): # override superclass diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index e3e0064c84da3..55de41794b30e 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -64,7 +64,7 @@ class SparseDtype(ExtensionDtype): # hash(nan) is (sometimes?) 0. _metadata = ("_dtype", "_fill_value", "_is_na_fill_value") - def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: + def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None): if isinstance(dtype, type(self)): if fill_value is None: diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index ba0a4d81a88d3..afdd8a01ee003 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -300,14 +300,15 @@ def table_schema_cb(key): _enable_data_resource_formatter(cf.get_option(key)) -def is_terminal(): +def is_terminal() -> bool: """ Detect if Python is running in a terminal. Returns True if Python is running in a terminal or False if not. """ try: - ip = get_ipython() + # error: Name 'get_ipython' is not defined + ip = get_ipython() # type: ignore except NameError: # assume standard Python interpreter in a terminal return True else: diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index aa0f7d2aba1fc..4c3f8b7374465 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -4,7 +4,10 @@ # define abstract base classes to enable isinstance type checking on our # objects def create_pandas_abc_type(name, attr, comp): - @classmethod + + # https://github.com/python/mypy/issues/1006 + # error: 'classmethod' used with a non-method + @classmethod # type: ignore def _check(cls, inst) -> bool: return getattr(inst, attr, "_typ") in comp diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 227547daf3668..ab4c0b819efe5 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2528,9 +2528,9 @@ def get_groupby( squeeze: bool = False, observed: bool = False, mutated: bool = False, -): +) -> GroupBy: - klass: Union[Type["SeriesGroupBy"], Type["DataFrameGroupBy"]] + klass: Type[GroupBy] if isinstance(obj, Series): from pandas.core.groupby.generic import SeriesGroupBy diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index ba476f9e25ee6..531014e4affec 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -1,5 +1,5 @@ import operator -from typing import Any +from typing import Any, List import numpy as np @@ -583,6 +583,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): target = ibase.ensure_index(target) + missing: List[int] if self.equals(target): indexer = None missing = [] diff --git a/setup.cfg b/setup.cfg index 84b3f9409b9ba..5d27d94015862 100644 --- a/setup.cfg +++ b/setup.cfg @@ -181,15 +181,9 @@ check_untyped_defs=False [mypy-pandas.core.computation.scope] check_untyped_defs=False -[mypy-pandas.core.config_init] -check_untyped_defs=False - [mypy-pandas.core.dtypes.cast] check_untyped_defs=False -[mypy-pandas.core.dtypes.generic] -check_untyped_defs=False - [mypy-pandas.core.frame] check_untyped_defs=False @@ -208,9 +202,6 @@ check_untyped_defs=False [mypy-pandas.core.indexes.base] check_untyped_defs=False -[mypy-pandas.core.indexes.category] -check_untyped_defs=False - [mypy-pandas.core.indexes.datetimelike] check_untyped_defs=False @@ -256,9 +247,6 @@ check_untyped_defs=False [mypy-pandas.core.reshape.reshape] check_untyped_defs=False -[mypy-pandas.core.series] -check_untyped_defs=False - [mypy-pandas.core.strings] check_untyped_defs=False
pandas\core\dtypes\generic.py:7: error: 'classmethod' used with a non-method pandas\core\config_init.py:310: error: Name 'get_ipython' is not defined pandas\core\indexes\category.py:588: error: Need type annotation for 'missing' (hint: "missing: List[<type>] = ...")
https://api.github.com/repos/pandas-dev/pandas/pulls/30572
2019-12-30T22:07:17Z
2019-12-31T12:27:26Z
2019-12-31T12:27:25Z
2019-12-31T14:03:19Z
PERF: Fixed performance regression in Series init
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 1dda51da49ffb..4a06ea9500770 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -276,10 +276,12 @@ def is_dtype(cls, dtype) -> bool: return False elif isinstance(dtype, cls): return True - try: - return cls.construct_from_string(dtype) is not None - except TypeError: - return False + if isinstance(dtype, str): + try: + return cls.construct_from_string(dtype) is not None + except TypeError: + return False + return False @property def _is_numeric(self) -> bool: diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index b77cd34700f10..226ab7b6057fd 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -882,7 +882,11 @@ def construct_from_string(cls, string): return cls(freq=string) except ValueError: pass - raise TypeError(f"Cannot construct a 'PeriodDtype' from '{string}'") + if isinstance(string, str): + msg = f"Cannot construct a 'PeriodDtype' from '{string}'" + else: + msg = f"'construct_from_string' expects a string, got {type(string)}" + raise TypeError(msg) def __str__(self) -> str_type: return self.name diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 4dee6e3e92a7f..d984e97579007 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -408,6 +408,9 @@ def test_construction_from_string(self): with pytest.raises(TypeError): PeriodDtype.construct_from_string("datetime64[ns, US/Eastern]") + with pytest.raises(TypeError, match="list"): + PeriodDtype.construct_from_string([1, 2, 3]) + def test_is_dtype(self): assert PeriodDtype.is_dtype(self.dtype) assert PeriodDtype.is_dtype("period[D]") diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py index 9a442f346c19f..74a60ce1a7048 100644 --- a/pandas/tests/extension/base/dtype.py +++ b/pandas/tests/extension/base/dtype.py @@ -38,6 +38,9 @@ def test_is_dtype_from_self(self, dtype): result = type(dtype).is_dtype(dtype) assert result is True + def test_is_dtype_other_input(self, dtype): + assert dtype.is_dtype([1, 2, 3]) is False + def test_is_not_string_type(self, dtype): return not pd.api.types.is_string_dtype(dtype)
Closes https://github.com/pandas-dev/pandas/issues/30564 PR ``` [ 50.00%] ··· series_methods.SeriesConstructor.time_constructor ok [ 50.00%] ··· ====== ========== data ------ ---------- None 670±0μs dict 84.4±0ms ====== ========== ``` master ``` [ 50.00%] ··· series_methods.SeriesConstructor.time_constructor ok [ 50.00%] ··· ====== ========= data ------ --------- None 756±0μs dict 725±0ms ====== ========= ``` No whatsnew, since the regression was only on master.
https://api.github.com/repos/pandas-dev/pandas/pulls/30571
2019-12-30T21:30:14Z
2019-12-31T16:12:03Z
2019-12-31T16:12:02Z
2019-12-31T16:12:10Z
CLN: refactor tests in test_moments_ewm.py
diff --git a/pandas/tests/window/common.py b/pandas/tests/window/common.py index c3648bc619c50..e1f322622de48 100644 --- a/pandas/tests/window/common.py +++ b/pandas/tests/window/common.py @@ -348,3 +348,34 @@ def get_result(obj, obj2=None): result.index = result.index.droplevel(1) expected = get_result(self.frame[1], self.frame[5]) tm.assert_series_equal(result, expected, check_names=False) + + +def ew_func(A, B, com, name, **kwargs): + return getattr(A.ewm(com, **kwargs), name)(B) + + +def check_binary_ew(name, A, B): + + result = ew_func(A=A, B=B, com=20, name=name, min_periods=5) + assert np.isnan(result.values[:14]).all() + assert not np.isnan(result.values[14:]).any() + + +def check_binary_ew_min_periods(name, min_periods, A, B): + # GH 7898 + result = ew_func(A, B, 20, name=name, min_periods=min_periods) + # binary functions (ewmcov, ewmcorr) with bias=False require at + # least two values + assert np.isnan(result.values[:11]).all() + assert not np.isnan(result.values[11:]).any() + + # check series of length 0 + empty = Series([], dtype=np.float64) + result = ew_func(empty, empty, 50, name=name, min_periods=min_periods) + tm.assert_series_equal(result, empty) + + # check series of length 1 + result = ew_func( + Series([1.0]), Series([1.0]), 50, name=name, min_periods=min_periods + ) + tm.assert_series_equal(result, Series([np.NaN])) diff --git a/pandas/tests/window/moments/conftest.py b/pandas/tests/window/moments/conftest.py new file mode 100644 index 0000000000000..2002f4d0bff43 --- /dev/null +++ b/pandas/tests/window/moments/conftest.py @@ -0,0 +1,20 @@ +import numpy as np +from numpy.random import randn +import pytest + +from pandas import Series + + +@pytest.fixture +def binary_ew_data(): + A = Series(randn(50), index=np.arange(50)) + B = A[2:] + randn(48) + + A[:10] = np.NaN + B[-10:] = np.NaN + return A, B + + +@pytest.fixture(params=[0, 1, 2]) +def min_periods(request): + return request.param diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py index bf2bd1420b7f4..46cd503e95e11 100644 --- a/pandas/tests/window/moments/test_moments_ewm.py +++ b/pandas/tests/window/moments/test_moments_ewm.py @@ -4,7 +4,13 @@ import pandas as pd from pandas import DataFrame, Series, concat -from pandas.tests.window.common import Base, ConsistencyBase +from pandas.tests.window.common import ( + Base, + ConsistencyBase, + check_binary_ew, + check_binary_ew_min_periods, + ew_func, +) import pandas.util.testing as tm @@ -216,6 +222,9 @@ def _check_ew(self, name=None, preserve_nan=False): if preserve_nan: assert result[self._nan_locs].isna().all() + @pytest.mark.parametrize("min_periods", [0, 1]) + @pytest.mark.parametrize("name", ["mean", "var", "vol"]) + def test_ew_min_periods(self, min_periods, name): # excluding NaNs correctly arr = randn(50) arr[:10] = np.NaN @@ -228,31 +237,30 @@ def _check_ew(self, name=None, preserve_nan=False): assert result[:11].isna().all() assert not result[11:].isna().any() - for min_periods in (0, 1): - result = getattr(s.ewm(com=50, min_periods=min_periods), name)() - if name == "mean": - assert result[:10].isna().all() - assert not result[10:].isna().any() - else: - # ewm.std, ewm.vol, ewm.var (with bias=False) require at least - # two values - assert result[:11].isna().all() - assert not result[11:].isna().any() - - # check series of length 0 - result = getattr( - Series(dtype=object).ewm(com=50, min_periods=min_periods), name - )() - tm.assert_series_equal(result, Series(dtype="float64")) - - # check series of length 1 - result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)() - if name == "mean": - tm.assert_series_equal(result, Series([1.0])) - else: - # ewm.std, ewm.vol, ewm.var with bias=False require at least - # two values - tm.assert_series_equal(result, Series([np.NaN])) + result = getattr(s.ewm(com=50, min_periods=min_periods), name)() + if name == "mean": + assert result[:10].isna().all() + assert not result[10:].isna().any() + else: + # ewm.std, ewm.vol, ewm.var (with bias=False) require at least + # two values + assert result[:11].isna().all() + assert not result[11:].isna().any() + + # check series of length 0 + result = getattr( + Series(dtype=object).ewm(com=50, min_periods=min_periods), name + )() + tm.assert_series_equal(result, Series(dtype="float64")) + + # check series of length 1 + result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)() + if name == "mean": + tm.assert_series_equal(result, Series([1.0])) + else: + # ewm.std, ewm.vol, ewm.var with bias=False require at least + # two values + tm.assert_series_equal(result, Series([np.NaN])) # pass in ints result2 = getattr(Series(np.arange(50)).ewm(span=10), name)() @@ -263,53 +271,27 @@ class TestEwmMomentsConsistency(ConsistencyBase): def setup_method(self, method): self._create_data() - def test_ewmcov(self): - self._check_binary_ew("cov") - def test_ewmcov_pairwise(self): self._check_pairwise_moment("ewm", "cov", span=10, min_periods=5) - def test_ewmcorr(self): - self._check_binary_ew("corr") + @pytest.mark.parametrize("name", ["cov", "corr"]) + def test_ewm_corr_cov(self, name, min_periods, binary_ew_data): + A, B = binary_ew_data + + check_binary_ew(name="corr", A=A, B=B) + check_binary_ew_min_periods("corr", min_periods, A, B) def test_ewmcorr_pairwise(self): self._check_pairwise_moment("ewm", "corr", span=10, min_periods=5) - def _check_binary_ew(self, name): - def func(A, B, com, **kwargs): - return getattr(A.ewm(com, **kwargs), name)(B) - - A = Series(randn(50), index=np.arange(50)) - B = A[2:] + randn(48) - - A[:10] = np.NaN - B[-10:] = np.NaN - - result = func(A, B, 20, min_periods=5) - assert np.isnan(result.values[:14]).all() - assert not np.isnan(result.values[14:]).any() - - # GH 7898 - for min_periods in (0, 1, 2): - result = func(A, B, 20, min_periods=min_periods) - # binary functions (ewmcov, ewmcorr) with bias=False require at - # least two values - assert np.isnan(result.values[:11]).all() - assert not np.isnan(result.values[11:]).any() - - # check series of length 0 - empty = Series([], dtype=np.float64) - result = func(empty, empty, 50, min_periods=min_periods) - tm.assert_series_equal(result, empty) - - # check series of length 1 - result = func(Series([1.0]), Series([1.0]), 50, min_periods=min_periods) - tm.assert_series_equal(result, Series([np.NaN])) + @pytest.mark.parametrize("name", ["cov", "corr"]) + def test_different_input_array_raise_exception(self, name, binary_ew_data): + A, _ = binary_ew_data msg = "Input arrays must be of the same type!" # exception raised is Exception with pytest.raises(Exception, match=msg): - func(A, randn(50), 20, min_periods=5) + ew_func(A, randn(50), 20, name=name, min_periods=5) @pytest.mark.slow @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
- [ ] xref: #30486 #30542 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30570
2019-12-30T21:28:36Z
2020-01-03T12:45:58Z
2020-01-03T12:45:58Z
2020-01-03T13:26:37Z
BUG: Change IntervalDtype.kind from None to "O"
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 8755abe642068..cfbe57be379ed 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -843,6 +843,7 @@ Interval - Bug in :meth:`IntervalIndex.get_indexer` where a :class:`Categorical` or :class:`CategoricalIndex` ``target`` would incorrectly raise a ``TypeError`` (:issue:`30063`) - Bug in ``pandas.core.dtypes.cast.infer_dtype_from_scalar`` where passing ``pandas_dtype=True`` did not infer :class:`IntervalDtype` (:issue:`30337`) +- Bug in :class:`IntervalDtype` where the ``kind`` attribute was incorrectly set as ``None`` instead of ``"O"`` (:issue:`30568`) Indexing ^^^^^^^^ diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index dc22a79a2f3fe..1fae25de45423 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -633,7 +633,14 @@ def is_string_dtype(arr_or_dtype) -> bool: # TODO: gh-15585: consider making the checks stricter. def condition(dtype) -> bool: - return dtype.kind in ("O", "S", "U") and not is_period_dtype(dtype) + return dtype.kind in ("O", "S", "U") and not is_excluded_dtype(dtype) + + def is_excluded_dtype(dtype) -> bool: + """ + These have kind = "O" but aren't string dtypes so need to be explicitly excluded + """ + is_excluded_checks = (is_period_dtype, is_interval_dtype) + return any(is_excluded(dtype) for is_excluded in is_excluded_checks) return _is_dtype(arr_or_dtype, condition) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index b77cd34700f10..3763d475ecb71 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -974,7 +974,7 @@ class IntervalDtype(PandasExtensionDtype): """ name = "interval" - kind: Optional[str_type] = None + kind: str_type = "O" str = "|O08" base = np.dtype("O") num = 103 diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 4dee6e3e92a7f..82ccbf9d954f5 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -685,6 +685,10 @@ def test_caching(self): tm.round_trip_pickle(dtype) assert len(IntervalDtype._cache) == 0 + def test_not_string(self): + # GH30568: though IntervalDtype has object kind, it cannot be string + assert not is_string_dtype(IntervalDtype()) + class TestCategoricalDtypeParametrized: @pytest.mark.parametrize( diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py index 9a442f346c19f..cbeda0f20f262 100644 --- a/pandas/tests/extension/base/dtype.py +++ b/pandas/tests/extension/base/dtype.py @@ -16,8 +16,7 @@ def test_name(self, dtype): def test_kind(self, dtype): valid = set("biufcmMOSUV") - if dtype.kind is not None: - assert dtype.kind in valid + assert dtype.kind in valid def test_construct_from_string_own_name(self, dtype): result = dtype.construct_from_string(dtype.name)
- [X] closes #30568 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry This is partially blocking #28399 as tests are failing there due to `kind` being `None`. cc @TomAugspurger
https://api.github.com/repos/pandas-dev/pandas/pulls/30569
2019-12-30T21:08:55Z
2019-12-31T12:26:14Z
2019-12-31T12:26:14Z
2019-12-31T17:20:18Z
CLN: Clean test moments for expanding
diff --git a/pandas/tests/window/moments/test_moments_expanding.py b/pandas/tests/window/moments/test_moments_expanding.py index 3361ecab28669..117c6eda0d6c8 100644 --- a/pandas/tests/window/moments/test_moments_expanding.py +++ b/pandas/tests/window/moments/test_moments_expanding.py @@ -173,19 +173,24 @@ def test_expanding_corr_pairwise_diff_length(self): tm.assert_frame_equal(result3, expected) tm.assert_frame_equal(result4, expected) + @pytest.mark.parametrize("has_min_periods", [True, False]) @pytest.mark.parametrize( "func,static_comp", [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)], ids=["sum", "mean", "max", "min"], ) - def test_expanding_func(self, func, static_comp): + def test_expanding_func(self, func, static_comp, has_min_periods): def expanding_func(x, min_periods=1, center=False, axis=0): exp = x.expanding(min_periods=min_periods, center=center, axis=axis) return getattr(exp, func)() self._check_expanding(expanding_func, static_comp, preserve_nan=False) + self._check_expanding_has_min_periods( + expanding_func, static_comp, has_min_periods + ) - def test_expanding_apply(self, raw): + @pytest.mark.parametrize("has_min_periods", [True, False]) + def test_expanding_apply(self, raw, has_min_periods): def expanding_mean(x, min_periods=1): exp = x.expanding(min_periods=min_periods) @@ -195,19 +200,20 @@ def expanding_mean(x, min_periods=1): # TODO(jreback), needed to add preserve_nan=False # here to make this pass self._check_expanding(expanding_mean, np.mean, preserve_nan=False) + self._check_expanding_has_min_periods(expanding_mean, np.mean, has_min_periods) + def test_expanding_apply_empty_series(self, raw): ser = Series([], dtype=np.float64) tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean(), raw=raw)) + def test_expanding_apply_min_periods_0(self, raw): # GH 8080 s = Series([None, None, None]) result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw) expected = Series([1.0, 2.0, 3.0]) tm.assert_series_equal(result, expected) - def _check_expanding( - self, func, static_comp, has_min_periods=True, preserve_nan=True - ): + def _check_expanding(self, func, static_comp, preserve_nan=True): series_result = func(self.series) assert isinstance(series_result, Series) @@ -220,6 +226,7 @@ def _check_expanding( if preserve_nan: assert result.iloc[self._nan_locs].isna().all() + def _check_expanding_has_min_periods(self, func, static_comp, has_min_periods): ser = Series(randn(50)) if has_min_periods: @@ -245,17 +252,9 @@ def _check_expanding( result = func(ser) tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) - def test_moment_functions_zero_length(self): - # GH 8056 - s = Series(dtype=np.float64) - s_expected = s - df1 = DataFrame() - df1_expected = df1 - df2 = DataFrame(columns=["a"]) - df2["a"] = df2["a"].astype("float64") - df2_expected = df2 - - functions = [ + @pytest.mark.parametrize( + "f", + [ lambda x: x.expanding().count(), lambda x: x.expanding(min_periods=5).cov(x, pairwise=False), lambda x: x.expanding(min_periods=5).corr(x, pairwise=False), @@ -271,23 +270,35 @@ def test_moment_functions_zero_length(self): lambda x: x.expanding(min_periods=5).median(), lambda x: x.expanding(min_periods=5).apply(sum, raw=False), lambda x: x.expanding(min_periods=5).apply(sum, raw=True), - ] - for f in functions: - try: - s_result = f(s) - tm.assert_series_equal(s_result, s_expected) + ], + ) + def test_moment_functions_zero_length(self, f): + # GH 8056 + s = Series(dtype=np.float64) + s_expected = s + df1 = DataFrame() + df1_expected = df1 + df2 = DataFrame(columns=["a"]) + df2["a"] = df2["a"].astype("float64") + df2_expected = df2 - df1_result = f(df1) - tm.assert_frame_equal(df1_result, df1_expected) + s_result = f(s) + tm.assert_series_equal(s_result, s_expected) - df2_result = f(df2) - tm.assert_frame_equal(df2_result, df2_expected) - except (ImportError): + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) - # scipy needed for rolling_window - continue + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) - def test_moment_functions_zero_length_pairwise(self): + @pytest.mark.parametrize( + "f", + [ + lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)), + lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)), + ], + ) + def test_moment_functions_zero_length_pairwise(self, f): df1 = DataFrame() df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar")) @@ -303,16 +314,12 @@ def test_moment_functions_zero_length_pairwise(self): columns=Index(["a"], name="foo"), dtype="float64", ) - functions = [ - lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)), - lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)), - ] - for f in functions: - df1_result = f(df1) - tm.assert_frame_equal(df1_result, df1_expected) - df2_result = f(df2) - tm.assert_frame_equal(df2_result, df2_expected) + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) + + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) @pytest.mark.slow @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
- xref #30486 #30542 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30566
2019-12-30T20:51:29Z
2020-01-01T02:36:20Z
2020-01-01T02:36:20Z
2020-01-01T02:36:27Z
TYP: Add return types to some top-level func
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 108e24ffee820..9ff968bc554e4 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1356,7 +1356,7 @@ def date_range( name=None, closed=None, **kwargs, -): +) -> DatetimeIndex: """ Return a fixed frequency DatetimeIndex. @@ -1522,7 +1522,7 @@ def bdate_range( holidays=None, closed=None, **kwargs, -): +) -> DatetimeIndex: """ Return a fixed frequency DatetimeIndex, with business day as the default frequency. diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 37ec05c40940e..7a22a6c846240 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -68,7 +68,7 @@ def merge( copy: bool = True, indicator: bool = False, validate=None, -): +) -> "DataFrame": op = _MergeOperation( left, right, @@ -183,7 +183,7 @@ def merge_ordered( fill_method=None, suffixes=("_x", "_y"), how: str = "outer", -): +) -> "DataFrame": """ Perform merge with optional filling/interpolation. @@ -317,7 +317,7 @@ def merge_asof( tolerance=None, allow_exact_matches: bool = True, direction: str = "backward", -): +) -> "DataFrame": """ Perform an asof merge. This is similar to a left-join except that we match on nearest key rather than equal keys. diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 4b21045cd0217..2eb2990bd58c4 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -35,7 +35,7 @@ def pivot_table( dropna=True, margins_name="All", observed=False, -): +) -> "DataFrame": index = _convert_by(index) columns = _convert_by(columns) @@ -148,7 +148,9 @@ def pivot_table( table = table.sort_index(axis=1) if fill_value is not None: - table = table.fillna(value=fill_value, downcast="infer") + filled = table.fillna(value=fill_value, downcast="infer") + assert filled is not None # needed for mypy + table = filled if margins: if dropna: @@ -426,7 +428,7 @@ def _convert_by(by): @Substitution("\ndata : DataFrame") @Appender(_shared_docs["pivot"], indents=1) -def pivot(data: "DataFrame", index=None, columns=None, values=None): +def pivot(data: "DataFrame", index=None, columns=None, values=None) -> "DataFrame": if values is None: cols = [columns] if index is None else [index, columns] append = index is None diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index aa14c3f3a63f3..6f2e6e844f8e5 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -118,7 +118,7 @@ def _json_normalize( errors: Optional[str] = "raise", sep: str = ".", max_level: Optional[int] = None, -): +) -> "DataFrame": """ Normalize semi-structured JSON data into a flat table.
Adds return type hints to some top-level funcs. I will take the rest over 1-2 more PRs. Having return types typed up will be good for the user experience (better piping etc.).
https://api.github.com/repos/pandas-dev/pandas/pulls/30565
2019-12-30T20:49:30Z
2019-12-31T09:08:04Z
2019-12-31T09:08:04Z
2019-12-31T12:37:14Z
ENH: Allow absolute precision in assert_almost_equal (#13357)
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 567b6853bd633..622e7be4ba831 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -781,6 +781,9 @@ Deprecations - The ``squeeze`` keyword in the ``groupby`` function is deprecated and will be removed in a future version (:issue:`32380`) - The ``tz`` keyword in :meth:`Period.to_timestamp` is deprecated and will be removed in a future version; use `per.to_timestamp(...).tz_localize(tz)`` instead (:issue:`34522`) - :meth:`DatetimeIndex.to_perioddelta` is deprecated and will be removed in a future version. Use ``index - index.to_period(freq).to_timestamp()`` instead (:issue:`34853`) +- :meth:`util.testing.assert_almost_equal` now accepts both relative and absolute + precision through the ``rtol``, and ``atol`` parameters, thus deprecating the + ``check_less_precise`` parameter. (:issue:`13357`). .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx index ca18afebf410b..785a4d1f8b923 100644 --- a/pandas/_libs/testing.pyx +++ b/pandas/_libs/testing.pyx @@ -1,3 +1,5 @@ +import math + import numpy as np from numpy cimport import_array import_array() @@ -42,12 +44,6 @@ cdef bint is_dictlike(obj): return hasattr(obj, 'keys') and hasattr(obj, '__getitem__') -cdef bint decimal_almost_equal(double desired, double actual, int decimal): - # Code from - # https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_almost_equal.html - return abs(desired - actual) < (0.5 * 10.0 ** -decimal) - - cpdef assert_dict_equal(a, b, bint compare_keys=True): assert is_dictlike(a) and is_dictlike(b), ( "Cannot compare dict objects, one or both is not dict-like" @@ -66,7 +62,7 @@ cpdef assert_dict_equal(a, b, bint compare_keys=True): cpdef assert_almost_equal(a, b, - check_less_precise=False, + rtol=1.e-5, atol=1.e-8, bint check_dtype=True, obj=None, lobj=None, robj=None, index_values=None): """ @@ -76,31 +72,33 @@ cpdef assert_almost_equal(a, b, ---------- a : object b : object - check_less_precise : bool or int, default False - Specify comparison precision. - 5 digits (False) or 3 digits (True) after decimal points are - compared. If an integer, then this will be the number of decimal - points to compare + rtol : float, default 1e-5 + Relative tolerance. + + .. versionadded:: 1.1.0 + atol : float, default 1e-8 + Absolute tolerance. + + .. versionadded:: 1.1.0 check_dtype: bool, default True - check dtype if both a and b are np.ndarray + check dtype if both a and b are np.ndarray. obj : str, default None Specify object name being compared, internally used to show - appropriate assertion message + appropriate assertion message. lobj : str, default None Specify left object name being compared, internally used to show - appropriate assertion message + appropriate assertion message. robj : str, default None Specify right object name being compared, internally used to show - appropriate assertion message + appropriate assertion message. index_values : ndarray, default None Specify shared index values of objects being compared, internally used - to show appropriate assertion message + to show appropriate assertion message. .. versionadded:: 1.1.0 """ cdef: - int decimal double diff = 0.0 Py_ssize_t i, na, nb double fa, fb @@ -111,8 +109,6 @@ cpdef assert_almost_equal(a, b, if robj is None: robj = b - assert isinstance(check_less_precise, (int, bool)) - if isinstance(a, dict) or isinstance(b, dict): return assert_dict_equal(a, b) @@ -170,8 +166,7 @@ cpdef assert_almost_equal(a, b, for i in range(len(a)): try: - assert_almost_equal(a[i], b[i], - check_less_precise=check_less_precise) + assert_almost_equal(a[i], b[i], rtol=rtol, atol=atol) except AssertionError: is_unequal = True diff += 1 @@ -203,24 +198,11 @@ cpdef assert_almost_equal(a, b, # inf comparison return True - if check_less_precise is True: - decimal = 3 - elif check_less_precise is False: - decimal = 5 - else: - decimal = check_less_precise - fa, fb = a, b - # case for zero - if abs(fa) < 1e-5: - if not decimal_almost_equal(fa, fb, decimal): - assert False, (f'(very low values) expected {fb:.5f} ' - f'but got {fa:.5f}, with decimal {decimal}') - else: - if not decimal_almost_equal(1, fb / fa, decimal): - assert False, (f'expected {fb:.5f} but got {fa:.5f}, ' - f'with decimal {decimal}') + if not math.isclose(fa, fb, rel_tol=rtol, abs_tol=atol): + assert False, (f"expected {fb:.5f} but got {fa:.5f}, " + f"with rtol={rtol}, atol={atol}") return True raise AssertionError(f"{a} != {b}") diff --git a/pandas/_testing.py b/pandas/_testing.py index ebb53dd81682c..fc6df7a95e348 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -22,6 +22,7 @@ set_locale, ) +from pandas._libs.lib import no_default import pandas._libs.testing as _testing from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries from pandas.compat import _get_lzma_file, _import_lzma @@ -64,6 +65,7 @@ TimedeltaArray, period_array, ) +from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin from pandas.io.common import urlopen from pandas.io.formats.printing import pprint_thing @@ -303,11 +305,54 @@ def write_to_compressed(compression, path, data, dest="test"): getattr(f, method)(*args) +def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float: + """ + Return the tolerance equivalent to the deprecated `check_less_precise` + parameter. + + Parameters + ---------- + check_less_precise : bool or int + + Returns + ------- + float + Tolerance to be used as relative/absolute tolerance. + + Examples + -------- + >>> # Using check_less_precise as a bool: + >>> _get_tol_from_less_precise(False) + 0.5e-5 + >>> _get_tol_from_less_precise(True) + 0.5e-3 + >>> # Using check_less_precise as an int representing the decimal + >>> # tolerance intended: + >>> _get_tol_from_less_precise(2) + 0.5e-2 + >>> _get_tol_from_less_precise(8) + 0.5e-8 + + """ + if isinstance(check_less_precise, bool): + if check_less_precise: + # 3-digit tolerance + return 0.5e-3 + else: + # 5-digit tolerance + return 0.5e-5 + else: + # Equivalent to setting checking_less_precise=<decimals> + return 0.5 * 10 ** -check_less_precise + + def assert_almost_equal( left, right, check_dtype: Union[bool, str] = "equiv", - check_less_precise: Union[bool, int] = False, + check_less_precise: Union[bool, int] = no_default, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, **kwargs, ): """ @@ -334,14 +379,37 @@ def assert_almost_equal( they are equivalent within the specified precision. Otherwise, we compare the **ratio** of the second number to the first number and check whether it is equivalent to 1 within the specified precision. + + .. deprecated:: 1.1.0 + Use `rtol` and `atol` instead to define relative/absolute + tolerance, respectively. Similar to :func:`math.isclose`. + rtol : float, default 1e-5 + Relative tolerance. + + .. versionadded:: 1.1.0 + atol : float, default 1e-8 + Absolute tolerance. + + .. versionadded:: 1.1.0 """ + if check_less_precise is not no_default: + warnings.warn( + "The 'check_less_precise' keyword in testing.assert_*_equal " + "is deprecated and will be removed in a future version. " + "You can stop passing 'check_less_precise' to silence this warning.", + FutureWarning, + stacklevel=2, + ) + rtol = atol = _get_tol_from_less_precise(check_less_precise) + if isinstance(left, pd.Index): assert_index_equal( left, right, check_exact=False, exact=check_dtype, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, **kwargs, ) @@ -351,7 +419,8 @@ def assert_almost_equal( right, check_exact=False, check_dtype=check_dtype, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, **kwargs, ) @@ -361,7 +430,8 @@ def assert_almost_equal( right, check_exact=False, check_dtype=check_dtype, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, **kwargs, ) @@ -381,11 +451,7 @@ def assert_almost_equal( obj = "Input" assert_class_equal(left, right, obj=obj) _testing.assert_almost_equal( - left, - right, - check_dtype=check_dtype, - check_less_precise=check_less_precise, - **kwargs, + left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs ) @@ -596,9 +662,11 @@ def assert_index_equal( right: Index, exact: Union[bool, str] = "equiv", check_names: bool = True, - check_less_precise: Union[bool, int] = False, + check_less_precise: Union[bool, int] = no_default, check_exact: bool = True, check_categorical: bool = True, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, obj: str = "Index", ) -> None: """ @@ -618,10 +686,22 @@ def assert_index_equal( Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare. + + .. deprecated:: 1.1.0 + Use `rtol` and `atol` instead to define relative/absolute + tolerance, respectively. Similar to :func:`math.isclose`. check_exact : bool, default True Whether to compare number exactly. check_categorical : bool, default True Whether to compare internal Categorical exactly. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + + .. versionadded:: 1.1.0 + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + + .. versionadded:: 1.1.0 obj : str, default 'Index' Specify object name being compared, internally used to show appropriate assertion message. @@ -650,6 +730,16 @@ def _get_ilevel_values(index, level): values = unique._shallow_copy(filled, name=index.names[level]) return values + if check_less_precise is not no_default: + warnings.warn( + "The 'check_less_precise' keyword in testing.assert_*_equal " + "is deprecated and will be removed in a future version. " + "You can stop passing 'check_less_precise' to silence this warning.", + FutureWarning, + stacklevel=2, + ) + rtol = atol = _get_tol_from_less_precise(check_less_precise) + # instance validation _check_isinstance(left, right, Index) @@ -686,8 +776,9 @@ def _get_ilevel_values(index, level): rlevel, exact=exact, check_names=check_names, - check_less_precise=check_less_precise, check_exact=check_exact, + rtol=rtol, + atol=atol, obj=lobj, ) # get_level_values may change dtype @@ -703,7 +794,8 @@ def _get_ilevel_values(index, level): _testing.assert_almost_equal( left.values, right.values, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, check_dtype=exact, obj=obj, lobj=left, @@ -1028,9 +1120,11 @@ def assert_extension_array_equal( left, right, check_dtype=True, - check_less_precise=False, - check_exact=False, index_values=None, + check_less_precise=no_default, + check_exact=False, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, ): """ Check that left and right ExtensionArrays are equal. @@ -1041,14 +1135,26 @@ def assert_extension_array_equal( The two arrays to compare. check_dtype : bool, default True Whether to check if the ExtensionArray dtypes are identical. + index_values : numpy.ndarray, default None + Optional index (shared by both left and right), used in output. check_less_precise : bool or int, default False Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare. + + .. deprecated:: 1.1.0 + Use `rtol` and `atol` instead to define relative/absolute + tolerance, respectively. Similar to :func:`math.isclose`. check_exact : bool, default False Whether to compare number exactly. - index_values : numpy.ndarray, default None - Optional index (shared by both left and right), used in output. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + + .. versionadded:: 1.1.0 + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + + .. versionadded:: 1.1.0 Notes ----- @@ -1056,12 +1162,26 @@ def assert_extension_array_equal( A mask of missing values is computed for each and checked to match. The remaining all-valid values are cast to object dtype and checked. """ + if check_less_precise is not no_default: + warnings.warn( + "The 'check_less_precise' keyword in testing.assert_*_equal " + "is deprecated and will be removed in a future version. " + "You can stop passing 'check_less_precise' to silence this warning.", + FutureWarning, + stacklevel=2, + ) + rtol = atol = _get_tol_from_less_precise(check_less_precise) + assert isinstance(left, ExtensionArray), "left is not an ExtensionArray" assert isinstance(right, ExtensionArray), "right is not an ExtensionArray" if check_dtype: assert_attr_equal("dtype", left, right, obj="ExtensionArray") - if hasattr(left, "asi8") and type(right) == type(left): + if ( + isinstance(left, DatetimeLikeArrayMixin) + and isinstance(right, DatetimeLikeArrayMixin) + and type(right) == type(left) + ): # Avoid slow object-dtype comparisons # np.asarray for case where we have a np.MaskedArray assert_numpy_array_equal( @@ -1086,7 +1206,8 @@ def assert_extension_array_equal( left_valid, right_valid, check_dtype=check_dtype, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, obj="ExtensionArray", index_values=index_values, ) @@ -1099,13 +1220,15 @@ def assert_series_equal( check_dtype=True, check_index_type="equiv", check_series_type=True, - check_less_precise=False, + check_less_precise=no_default, check_names=True, check_exact=False, check_datetimelike_compat=False, check_categorical=True, check_category_order=True, check_freq=True, + rtol=1.0e-5, + atol=1.0e-8, obj="Series", ): """ @@ -1132,6 +1255,10 @@ def assert_series_equal( they are equivalent within the specified precision. Otherwise, we compare the **ratio** of the second number to the first number and check whether it is equivalent to 1 within the specified precision. + + .. deprecated:: 1.1.0 + Use `rtol` and `atol` instead to define relative/absolute + tolerance, respectively. Similar to :func:`math.isclose`. check_names : bool, default True Whether to check the Series and Index names attribute. check_exact : bool, default False @@ -1146,6 +1273,12 @@ def assert_series_equal( .. versionadded:: 1.0.2 check_freq : bool, default True Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + + .. versionadded:: 1.1.0 + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. .. versionadded:: 1.1.0 obj : str, default 'Series' @@ -1154,6 +1287,16 @@ def assert_series_equal( """ __tracebackhide__ = True + if check_less_precise is not no_default: + warnings.warn( + "The 'check_less_precise' keyword in testing.assert_*_equal " + "is deprecated and will be removed in a future version. " + "You can stop passing 'check_less_precise' to silence this warning.", + FutureWarning, + stacklevel=2, + ) + rtol = atol = _get_tol_from_less_precise(check_less_precise) + # instance validation _check_isinstance(left, right, Series) @@ -1172,9 +1315,10 @@ def assert_series_equal( right.index, exact=check_index_type, check_names=check_names, - check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, + rtol=rtol, + atol=atol, obj=f"{obj}.index", ) if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)): @@ -1227,7 +1371,8 @@ def assert_series_equal( _testing.assert_almost_equal( left._values, right._values, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, check_dtype=check_dtype, obj=str(obj), index_values=np.asarray(left.index), @@ -1245,7 +1390,8 @@ def assert_series_equal( _testing.assert_almost_equal( left._values, right._values, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, check_dtype=check_dtype, obj=str(obj), index_values=np.asarray(left.index), @@ -1273,7 +1419,7 @@ def assert_frame_equal( check_index_type="equiv", check_column_type="equiv", check_frame_type=True, - check_less_precise=False, + check_less_precise=no_default, check_names=True, by_blocks=False, check_exact=False, @@ -1281,6 +1427,8 @@ def assert_frame_equal( check_categorical=True, check_like=False, check_freq=True, + rtol=1.0e-5, + atol=1.0e-8, obj="DataFrame", ): """ @@ -1318,6 +1466,10 @@ def assert_frame_equal( they are equivalent within the specified precision. Otherwise, we compare the **ratio** of the second number to the first number and check whether it is equivalent to 1 within the specified precision. + + .. deprecated:: 1.1.0 + Use `rtol` and `atol` instead to define relative/absolute + tolerance, respectively. Similar to :func:`math.isclose`. check_names : bool, default True Whether to check that the `names` attribute for both the `index` and `column` attributes of the DataFrame is identical. @@ -1336,6 +1488,12 @@ def assert_frame_equal( (same as in columns) - same labels must be with the same data. check_freq : bool, default True Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + + .. versionadded:: 1.1.0 + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. .. versionadded:: 1.1.0 obj : str, default 'DataFrame' @@ -1377,6 +1535,16 @@ def assert_frame_equal( """ __tracebackhide__ = True + if check_less_precise is not no_default: + warnings.warn( + "The 'check_less_precise' keyword in testing.assert_*_equal " + "is deprecated and will be removed in a future version. " + "You can stop passing 'check_less_precise' to silence this warning.", + FutureWarning, + stacklevel=2, + ) + rtol = atol = _get_tol_from_less_precise(check_less_precise) + # instance validation _check_isinstance(left, right, DataFrame) @@ -1399,9 +1567,10 @@ def assert_frame_equal( right.index, exact=check_index_type, check_names=check_names, - check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, + rtol=rtol, + atol=atol, obj=f"{obj}.index", ) @@ -1411,9 +1580,10 @@ def assert_frame_equal( right.columns, exact=check_column_type, check_names=check_names, - check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, + rtol=rtol, + atol=atol, obj=f"{obj}.columns", ) @@ -1439,13 +1609,14 @@ def assert_frame_equal( rcol, check_dtype=check_dtype, check_index_type=check_index_type, - check_less_precise=check_less_precise, check_exact=check_exact, check_names=check_names, check_datetimelike_compat=check_datetimelike_compat, check_categorical=check_categorical, check_freq=check_freq, obj=f'{obj}.iloc[:, {i}] (column name="{col}")', + rtol=rtol, + atol=atol, ) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index db21161f84cf7..db8bb5ca3c437 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -32,7 +32,8 @@ def assert_stat_op_calc( has_skipna=True, check_dtype=True, check_dates=False, - check_less_precise=False, + rtol=1e-5, + atol=1e-8, skipna_alternative=None, ): """ @@ -54,9 +55,10 @@ def assert_stat_op_calc( "alternative(frame)" should be checked. check_dates : bool, default false Whether opname should be tested on a Datetime Series - check_less_precise : bool, default False - Whether results should only be compared approximately; - passed on to tm.assert_series_equal + rtol : float, default 1e-5 + Relative tolerance. + atol : float, default 1e-8 + Absolute tolerance. skipna_alternative : function, default None NaN-safe version of alternative """ @@ -87,14 +89,16 @@ def wrapper(x): result0, frame.apply(wrapper), check_dtype=check_dtype, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, ) # HACK: win32 tm.assert_series_equal( result1, frame.apply(wrapper, axis=1), check_dtype=False, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, ) else: skipna_wrapper = alternative @@ -105,13 +109,14 @@ def wrapper(x): result0, frame.apply(skipna_wrapper), check_dtype=check_dtype, - check_less_precise=check_less_precise, + rtol=rtol, + atol=atol, ) if opname in ["sum", "prod"]: expected = frame.apply(skipna_wrapper, axis=1) tm.assert_series_equal( - result1, expected, check_dtype=False, check_less_precise=check_less_precise + result1, expected, check_dtype=False, rtol=rtol, atol=atol, ) # check dtypes @@ -339,7 +344,7 @@ def kurt(x): np.sum, mixed_float_frame.astype("float32"), check_dtype=False, - check_less_precise=True, + rtol=1e-3, ) assert_stat_op_calc( diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 9303a084f1e71..6f19ec40c2520 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -649,7 +649,7 @@ def test_nlargest_mi_grouper(): ] expected = Series(exp_values, index=exp_idx) - tm.assert_series_equal(result, expected, check_exact=False) + tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3) def test_nsmallest(): diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 7dc73d5be1538..7b6acf7eed685 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -1081,9 +1081,7 @@ def test_decode_array_with_big_int(self): @pytest.mark.parametrize("sign", [-1, 1]) def test_decode_floating_point(self, sign, float_number): float_number *= sign - tm.assert_almost_equal( - float_number, ujson.loads(str(float_number)), check_less_precise=15 - ) + tm.assert_almost_equal(float_number, ujson.loads(str(float_number)), rtol=1e-15) def test_encode_big_set(self): s = set() diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 70713768c8d1e..a07e7a74b7573 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2389,7 +2389,7 @@ def test_write_row_by_row(self): result = sql.read_sql("select * from test", con=self.conn) result.index = frame.index - tm.assert_frame_equal(result, frame, check_less_precise=True) + tm.assert_frame_equal(result, frame, rtol=1e-3) def test_execute(self): frame = tm.makeTimeDataFrame() @@ -2649,7 +2649,7 @@ def test_write_row_by_row(self): result = sql.read_sql("select * from test", con=self.conn) result.index = frame.index - tm.assert_frame_equal(result, frame, check_less_precise=True) + tm.assert_frame_equal(result, frame, rtol=1e-3) # GH#32571 result comes back rounded to 6 digits in some builds; # no obvious pattern diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index e54f4784e9c4f..df2c9ecbd7a0a 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -201,19 +201,19 @@ def test_conversion(self): assert rs[1] == xp def test_conversion_float(self): - decimals = 9 + rtol = 0.5 * 10 ** -9 rs = self.dtc.convert(Timestamp("2012-1-1 01:02:03", tz="UTC"), None, None) xp = converter.dates.date2num(Timestamp("2012-1-1 01:02:03", tz="UTC")) - tm.assert_almost_equal(rs, xp, decimals) + tm.assert_almost_equal(rs, xp, rtol=rtol) rs = self.dtc.convert( Timestamp("2012-1-1 09:02:03", tz="Asia/Hong_Kong"), None, None ) - tm.assert_almost_equal(rs, xp, decimals) + tm.assert_almost_equal(rs, xp, rtol=rtol) rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None) - tm.assert_almost_equal(rs, xp, decimals) + tm.assert_almost_equal(rs, xp, rtol=rtol) def test_conversion_outofbounds_datetime(self): # 2579 @@ -249,13 +249,13 @@ def test_time_formatter(self, time, format_expected): assert result == format_expected def test_dateindex_conversion(self): - decimals = 9 + rtol = 10 ** -9 for freq in ("B", "L", "S"): dateindex = tm.makeDateIndex(k=10, freq=freq) rs = self.dtc.convert(dateindex, None, None) xp = converter.dates.date2num(dateindex._mpl_repr()) - tm.assert_almost_equal(rs, xp, decimals) + tm.assert_almost_equal(rs, xp, rtol=rtol) def test_resolution(self): def _assert_less(ts1, ts2): diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 44a8452964f5a..a080bf0feaebc 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1472,7 +1472,7 @@ def test_group_var_generic_2d_some_nan(self): expected_counts = counts + 2 self.algo(out, counts, values, labels) - tm.assert_almost_equal(out, expected_out, check_less_precise=6) + tm.assert_almost_equal(out, expected_out, rtol=0.5e-06) tm.assert_numpy_array_equal(counts, expected_counts) def test_group_var_constant(self): @@ -1510,7 +1510,7 @@ def test_group_var_large_inputs(self): self.algo(out, counts, values, labels) assert counts[0] == 10 ** 6 - tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True) + tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3) class TestGroupVarFloat32(GroupVarTestMixin): diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index cac6a59527a6e..0d60e6e8a978f 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -782,27 +782,27 @@ def setup_method(self, method): def test_nanvar_all_finite(self): samples = self.samples actual_variance = nanops.nanvar(samples) - tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2) + tm.assert_almost_equal(actual_variance, self.variance, rtol=1e-2) def test_nanvar_nans(self): samples = np.nan * np.ones(2 * self.samples.shape[0]) samples[::2] = self.samples actual_variance = nanops.nanvar(samples, skipna=True) - tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2) + tm.assert_almost_equal(actual_variance, self.variance, rtol=1e-2) actual_variance = nanops.nanvar(samples, skipna=False) - tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2) + tm.assert_almost_equal(actual_variance, np.nan, rtol=1e-2) def test_nanstd_nans(self): samples = np.nan * np.ones(2 * self.samples.shape[0]) samples[::2] = self.samples actual_std = nanops.nanstd(samples, skipna=True) - tm.assert_almost_equal(actual_std, self.variance ** 0.5, check_less_precise=2) + tm.assert_almost_equal(actual_std, self.variance ** 0.5, rtol=1e-2) actual_std = nanops.nanvar(samples, skipna=False) - tm.assert_almost_equal(actual_std, np.nan, check_less_precise=2) + tm.assert_almost_equal(actual_std, np.nan, rtol=1e-2) def test_nanvar_axis(self): # Generate some sample data. @@ -812,7 +812,7 @@ def test_nanvar_axis(self): actual_variance = nanops.nanvar(samples, axis=1) tm.assert_almost_equal( - actual_variance, np.array([self.variance, 1.0 / 12]), check_less_precise=2 + actual_variance, np.array([self.variance, 1.0 / 12]), rtol=1e-2 ) def test_nanvar_ddof(self): @@ -826,15 +826,13 @@ def test_nanvar_ddof(self): # The unbiased estimate. var = 1.0 / 12 - tm.assert_almost_equal(variance_1, var, check_less_precise=2) + tm.assert_almost_equal(variance_1, var, rtol=1e-2) # The underestimated variance. - tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, check_less_precise=2) + tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, rtol=1e-2) # The overestimated variance. - tm.assert_almost_equal( - variance_2, (n - 1.0) / (n - 2.0) * var, check_less_precise=2 - ) + tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var, rtol=1e-2) def test_ground_truth(self): # Test against values that were precomputed with Numpy. diff --git a/pandas/tests/util/conftest.py b/pandas/tests/util/conftest.py index 5eff49ab774b5..b68bcc93431d0 100644 --- a/pandas/tests/util/conftest.py +++ b/pandas/tests/util/conftest.py @@ -16,8 +16,8 @@ def check_index_type(request): return request.param -@pytest.fixture(params=[True, False]) -def check_less_precise(request): +@pytest.fixture(params=[0.5e-3, 0.5e-5]) +def rtol(request): return request.param diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py index b8048891e4876..c25668c33bfc4 100644 --- a/pandas/tests/util/test_assert_almost_equal.py +++ b/pandas/tests/util/test_assert_almost_equal.py @@ -17,7 +17,7 @@ def _assert_almost_equal_both(a, b, **kwargs): The first object to compare. b : object The second object to compare. - kwargs : dict + **kwargs The arguments passed to `tm.assert_almost_equal`. """ tm.assert_almost_equal(a, b, **kwargs) @@ -34,7 +34,7 @@ def _assert_not_almost_equal(a, b, **kwargs): The first object to compare. b : object The second object to compare. - kwargs : dict + **kwargs The arguments passed to `tm.assert_almost_equal`. """ try: @@ -57,13 +57,23 @@ def _assert_not_almost_equal_both(a, b, **kwargs): The first object to compare. b : object The second object to compare. - kwargs : dict + **kwargs The arguments passed to `tm.assert_almost_equal`. """ _assert_not_almost_equal(a, b, **kwargs) _assert_not_almost_equal(b, a, **kwargs) +@pytest.mark.parametrize( + "a,b,check_less_precise", + [(1.1, 1.1, False), (1.1, 1.100001, True), (1.1, 1.1001, 2)], +) +def test_assert_almost_equal_deprecated(a, b, check_less_precise): + # GH#30562 + with tm.assert_produces_warning(FutureWarning): + _assert_almost_equal_both(a, b, check_less_precise=check_less_precise) + + @pytest.mark.parametrize( "a,b", [ @@ -78,12 +88,65 @@ def test_assert_almost_equal_numbers(a, b): _assert_almost_equal_both(a, b) -@pytest.mark.parametrize("a,b", [(1.1, 1), (1.1, True), (1, 2), (1.0001, np.int16(1))]) +@pytest.mark.parametrize( + "a,b", + [ + (1.1, 1), + (1.1, True), + (1, 2), + (1.0001, np.int16(1)), + # The following two examples are not "almost equal" due to tol. + (0.1, 0.1001), + (0.0011, 0.0012), + ], +) def test_assert_not_almost_equal_numbers(a, b): _assert_not_almost_equal_both(a, b) -@pytest.mark.parametrize("a,b", [(0, 0), (0, 0.0), (0, np.float64(0)), (0.000001, 0)]) +@pytest.mark.parametrize( + "a,b", + [ + (1.1, 1.1), + (1.1, 1.100001), + (1.1, 1.1001), + (0.000001, 0.000005), + (1000.0, 1000.0005), + # Testing this example, as per #13357 + (0.000011, 0.000012), + ], +) +def test_assert_almost_equal_numbers_atol(a, b): + # Equivalent to the deprecated check_less_precise=True + _assert_almost_equal_both(a, b, rtol=0.5e-3, atol=0.5e-3) + + +@pytest.mark.parametrize("a,b", [(1.1, 1.11), (0.1, 0.101), (0.000011, 0.001012)]) +def test_assert_not_almost_equal_numbers_atol(a, b): + _assert_not_almost_equal_both(a, b, atol=1e-3) + + +@pytest.mark.parametrize( + "a,b", + [ + (1.1, 1.1), + (1.1, 1.100001), + (1.1, 1.1001), + (1000.0, 1000.0005), + (1.1, 1.11), + (0.1, 0.101), + ], +) +def test_assert_almost_equal_numbers_rtol(a, b): + _assert_almost_equal_both(a, b, rtol=0.05) + + +@pytest.mark.parametrize("a,b", [(0.000011, 0.000012), (0.000001, 0.000005)]) +def test_assert_not_almost_equal_numbers_rtol(a, b): + _assert_not_almost_equal_both(a, b, rtol=0.05) + + +@pytest.mark.parametrize("a,b", [(0, 0), (0, 0.0), (0, np.float64(0)), (0.00000001, 0)]) def test_assert_almost_equal_numbers_with_zeros(a, b): _assert_almost_equal_both(a, b) @@ -235,7 +298,7 @@ def test_assert_almost_equal_object(): def test_assert_almost_equal_value_mismatch(): - msg = "expected 2\\.00000 but got 1\\.00000, with decimal 5" + msg = "expected 2\\.00000 but got 1\\.00000, with rtol=1e-05, atol=1e-08" with pytest.raises(AssertionError, match=msg): tm.assert_almost_equal(1, 2) diff --git a/pandas/tests/util/test_assert_extension_array_equal.py b/pandas/tests/util/test_assert_extension_array_equal.py index 0547323b882f6..d9fdf1491c328 100644 --- a/pandas/tests/util/test_assert_extension_array_equal.py +++ b/pandas/tests/util/test_assert_extension_array_equal.py @@ -32,16 +32,13 @@ def test_assert_extension_array_equal_not_exact(kwargs): tm.assert_extension_array_equal(arr1, arr2, **kwargs) -@pytest.mark.parametrize( - "check_less_precise", [True, False, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] -) -def test_assert_extension_array_equal_less_precise(check_less_precise): +@pytest.mark.parametrize("decimals", range(10)) +def test_assert_extension_array_equal_less_precise(decimals): + rtol = 0.5 * 10 ** -decimals arr1 = SparseArray([0.5, 0.123456]) arr2 = SparseArray([0.5, 0.123457]) - kwargs = dict(check_less_precise=check_less_precise) - - if check_less_precise is False or check_less_precise >= 5: + if decimals >= 5: msg = """\ ExtensionArray are different @@ -50,9 +47,9 @@ def test_assert_extension_array_equal_less_precise(check_less_precise): \\[right\\]: \\[0\\.5, 0\\.123457\\]""" with pytest.raises(AssertionError, match=msg): - tm.assert_extension_array_equal(arr1, arr2, **kwargs) + tm.assert_extension_array_equal(arr1, arr2, rtol=rtol) else: - tm.assert_extension_array_equal(arr1, arr2, **kwargs) + tm.assert_extension_array_equal(arr1, arr2, rtol=rtol) def test_assert_extension_array_equal_dtype_mismatch(check_dtype): diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py index bbbeebcec2569..125af6ef78593 100644 --- a/pandas/tests/util/test_assert_index_equal.py +++ b/pandas/tests/util/test_assert_index_equal.py @@ -82,12 +82,12 @@ def test_index_equal_values_close(check_exact): tm.assert_index_equal(idx1, idx2, check_exact=check_exact) -def test_index_equal_values_less_close(check_exact, check_less_precise): +def test_index_equal_values_less_close(check_exact, rtol): idx1 = Index([1, 2, 3.0]) idx2 = Index([1, 2, 3.0001]) - kwargs = dict(check_exact=check_exact, check_less_precise=check_less_precise) + kwargs = dict(check_exact=check_exact, rtol=rtol) - if check_exact or not check_less_precise: + if check_exact or rtol < 0.5e-3: msg = """Index are different Index values are different \\(33\\.33333 %\\) @@ -100,10 +100,10 @@ def test_index_equal_values_less_close(check_exact, check_less_precise): tm.assert_index_equal(idx1, idx2, **kwargs) -def test_index_equal_values_too_far(check_exact, check_less_precise): +def test_index_equal_values_too_far(check_exact, rtol): idx1 = Index([1, 2, 3]) idx2 = Index([1, 2, 4]) - kwargs = dict(check_exact=check_exact, check_less_precise=check_less_precise) + kwargs = dict(check_exact=check_exact, rtol=rtol) msg = """Index are different @@ -115,10 +115,10 @@ def test_index_equal_values_too_far(check_exact, check_less_precise): tm.assert_index_equal(idx1, idx2, **kwargs) -def test_index_equal_level_values_mismatch(check_exact, check_less_precise): +def test_index_equal_level_values_mismatch(check_exact, rtol): idx1 = MultiIndex.from_tuples([("A", 2), ("A", 2), ("B", 3), ("B", 4)]) idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)]) - kwargs = dict(check_exact=check_exact, check_less_precise=check_less_precise) + kwargs = dict(check_exact=check_exact, rtol=rtol) msg = """MultiIndex level \\[1\\] are different diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index 337a06b91e443..859c8474562a3 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -102,22 +102,20 @@ def test_series_not_equal_metadata_mismatch(kwargs): @pytest.mark.parametrize("data1,data2", [(0.12345, 0.12346), (0.1235, 0.1236)]) @pytest.mark.parametrize("dtype", ["float32", "float64"]) -@pytest.mark.parametrize("check_less_precise", [False, True, 0, 1, 2, 3, 10]) -def test_less_precise(data1, data2, dtype, check_less_precise): +@pytest.mark.parametrize("decimals", [0, 1, 2, 3, 5, 10]) +def test_less_precise(data1, data2, dtype, decimals): + rtol = 10 ** -decimals s1 = Series([data1], dtype=dtype) s2 = Series([data2], dtype=dtype) - kwargs = dict(check_less_precise=check_less_precise) - - if (check_less_precise is False or check_less_precise == 10) or ( - (check_less_precise is True or check_less_precise >= 3) - and abs(data1 - data2) >= 0.0001 + if (decimals == 5 or decimals == 10) or ( + decimals >= 3 and abs(data1 - data2) >= 0.0005 ): msg = "Series values are different" with pytest.raises(AssertionError, match=msg): - tm.assert_series_equal(s1, s2, **kwargs) + tm.assert_series_equal(s1, s2, rtol=rtol) else: - _assert_series_equal_both(s1, s2, **kwargs) + _assert_series_equal_both(s1, s2, rtol=rtol) @pytest.mark.parametrize( @@ -151,7 +149,7 @@ def test_series_equal_index_dtype(s1, s2, msg, check_index_type): tm.assert_series_equal(s1, s2, **kwargs) -def test_series_equal_length_mismatch(check_less_precise): +def test_series_equal_length_mismatch(rtol): msg = """Series are different Series length are different @@ -162,10 +160,10 @@ def test_series_equal_length_mismatch(check_less_precise): s2 = Series([1, 2, 3, 4]) with pytest.raises(AssertionError, match=msg): - tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise) + tm.assert_series_equal(s1, s2, rtol=rtol) -def test_series_equal_numeric_values_mismatch(check_less_precise): +def test_series_equal_numeric_values_mismatch(rtol): msg = """Series are different Series values are different \\(33\\.33333 %\\) @@ -177,10 +175,10 @@ def test_series_equal_numeric_values_mismatch(check_less_precise): s2 = Series([1, 2, 4]) with pytest.raises(AssertionError, match=msg): - tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise) + tm.assert_series_equal(s1, s2, rtol=rtol) -def test_series_equal_categorical_values_mismatch(check_less_precise): +def test_series_equal_categorical_values_mismatch(rtol): msg = """Series are different Series values are different \\(66\\.66667 %\\) @@ -194,10 +192,10 @@ def test_series_equal_categorical_values_mismatch(check_less_precise): s2 = Series(Categorical(["a", "c", "b"])) with pytest.raises(AssertionError, match=msg): - tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise) + tm.assert_series_equal(s1, s2, rtol=rtol) -def test_series_equal_datetime_values_mismatch(check_less_precise): +def test_series_equal_datetime_values_mismatch(rtol): msg = """numpy array are different numpy array values are different \\(100.0 %\\) @@ -209,7 +207,7 @@ def test_series_equal_datetime_values_mismatch(check_less_precise): s2 = Series(pd.date_range("2019-02-02", periods=3, freq="D")) with pytest.raises(AssertionError, match=msg): - tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise) + tm.assert_series_equal(s1, s2, rtol=rtol) def test_series_equal_categorical_mismatch(check_categorical):
closes #9457 Greetings! This is my first pull-request on an open source project, so I hope I did not miss anything at least too obvious... Thank you, in advance :smile: I do have a few questions: * Should I add the `versionadded` directive in all docstrings that were changed? * Which "whatsnew" release notes should I add this change to? v1.0.0? * Should #9457 also be closed, as from my understanding this was also the underlying issue? * Should I be the one ticking the checkboxes below? This fixes the issue where: # Fails because it's doing (1 - .1 / .1001) assert_almost_equal(0.1, 0.1001, check_less_precise=True) # Works as intuitively expected assert_almost_equal( 0.1, 0.1001, atol=0.01, ) Commit message below: ------- This commit makes `assert_almost_equal` accept both relative and absolute precision when comparing numbers, through two new keyword arguments: `rtol`, and `atol`, respectively. Under the hood, `_libs.testing.assert_almost_equal` is now calling `math.isclose`, instead of an adaptaion of [numpy.testing.assert_almost_equal](https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.testing.assert_almost_equal.html). - [x] closes #13357 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30562
2019-12-30T19:23:21Z
2020-06-24T22:25:21Z
2020-06-24T22:25:20Z
2022-11-12T11:40:12Z
BLD: Fix IntervalTree build warnings
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in index 333c05f7c0dc5..316c9e5b7e5f0 100644 --- a/pandas/_libs/intervaltree.pxi.in +++ b/pandas/_libs/intervaltree.pxi.in @@ -6,12 +6,20 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in from pandas._libs.algos import is_monotonic -ctypedef fused scalar_t: - float64_t - float32_t +ctypedef fused int_scalar_t: int64_t int32_t + float64_t + float32_t + +ctypedef fused uint_scalar_t: uint64_t + float64_t + float32_t + +ctypedef fused scalar_t: + int_scalar_t + uint_scalar_t # ---------------------------------------------------------------------- # IntervalTree @@ -128,7 +136,12 @@ cdef class IntervalTree(IntervalMixin): result = Int64Vector() old_len = 0 for i in range(len(target)): - self.root.query(result, target[i]) + try: + self.root.query(result, target[i]) + except OverflowError: + # overflow -> no match, which is already handled below + pass + if result.data.n == old_len: result.append(-1) elif result.data.n > old_len + 1: @@ -150,7 +163,12 @@ cdef class IntervalTree(IntervalMixin): missing = Int64Vector() old_len = 0 for i in range(len(target)): - self.root.query(result, target[i]) + try: + self.root.query(result, target[i]) + except OverflowError: + # overflow -> no match, which is already handled below + pass + if result.data.n == old_len: result.append(-1) missing.append(i) @@ -202,19 +220,26 @@ for dtype in ['float32', 'float64', 'int32', 'int64', 'uint64']: ('neither', '<', '<')]: cmp_left_converse = '<' if cmp_left == '<=' else '<=' cmp_right_converse = '<' if cmp_right == '<=' else '<=' + if dtype.startswith('int'): + fused_prefix = 'int_' + elif dtype.startswith('uint'): + fused_prefix = 'uint_' + elif dtype.startswith('float'): + fused_prefix = '' nodes.append((dtype, dtype.title(), closed, closed.title(), cmp_left, cmp_right, cmp_left_converse, - cmp_right_converse)) + cmp_right_converse, + fused_prefix)) }} NODE_CLASSES = {} {{for dtype, dtype_title, closed, closed_title, cmp_left, cmp_right, - cmp_left_converse, cmp_right_converse in nodes}} + cmp_left_converse, cmp_right_converse, fused_prefix in nodes}} cdef class {{dtype_title}}Closed{{closed_title}}IntervalNode: """Non-terminal node for an IntervalTree @@ -317,7 +342,7 @@ cdef class {{dtype_title}}Closed{{closed_title}}IntervalNode: @cython.wraparound(False) @cython.boundscheck(False) @cython.initializedcheck(False) - cpdef query(self, Int64Vector result, scalar_t point): + cpdef query(self, Int64Vector result, {{fused_prefix}}scalar_t point): """Recursively query this node and its sub-nodes for intervals that overlap with the query point. """ diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index f2fca34e083c2..695a98777eadb 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -63,6 +63,17 @@ def test_get_indexer(self, tree): ): tree.get_indexer(np.array([3.0])) + @pytest.mark.parametrize( + "dtype, target_value", [("int64", 2 ** 63 + 1), ("uint64", -1)] + ) + def test_get_indexer_overflow(self, dtype, target_value): + left, right = np.array([0, 1], dtype=dtype), np.array([1, 2], dtype=dtype) + tree = IntervalTree(left, right) + + result = tree.get_indexer(np.array([target_value])) + expected = np.array([-1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + def test_get_indexer_non_unique(self, tree): indexer, missing = tree.get_indexer_non_unique(np.array([1.0, 2.0, 6.5])) @@ -82,6 +93,21 @@ def test_get_indexer_non_unique(self, tree): expected = np.array([2], dtype="intp") tm.assert_numpy_array_equal(result, expected) + @pytest.mark.parametrize( + "dtype, target_value", [("int64", 2 ** 63 + 1), ("uint64", -1)] + ) + def test_get_indexer_non_unique_overflow(self, dtype, target_value): + left, right = np.array([0, 2], dtype=dtype), np.array([1, 3], dtype=dtype) + tree = IntervalTree(left, right) + target = np.array([target_value]) + + result_indexer, result_missing = tree.get_indexer_non_unique(target) + expected_indexer = np.array([-1], dtype="intp") + tm.assert_numpy_array_equal(result_indexer, expected_indexer) + + expected_missing = np.array([0], dtype="intp") + tm.assert_numpy_array_equal(result_missing, expected_missing) + def test_duplicates(self, dtype): left = np.array([0, 0, 0], dtype=dtype) tree = IntervalTree(left, left + 1)
- [X] closes #27169 - [X] closes #30365 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` xref #30366 cc @WillAyd
https://api.github.com/repos/pandas-dev/pandas/pulls/30560
2019-12-30T17:28:58Z
2020-01-01T02:45:56Z
2020-01-01T02:45:55Z
2020-01-01T03:32:42Z
[TST] Test DataFrame.append with other dtypes
diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py index fac6a9139462f..1d6935795b0e4 100644 --- a/pandas/tests/frame/methods/test_append.py +++ b/pandas/tests/frame/methods/test_append.py @@ -177,3 +177,19 @@ def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp): result = df.append(df.iloc[0]).iloc[-1] expected = pd.Series(pd.Timestamp(timestamp, tz=tz), name=0) tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data, dtype", + [ + ([1], pd.Int64Dtype()), + ([1], pd.CategoricalDtype()), + ([pd.Interval(left=0, right=5)], pd.IntervalDtype()), + ([pd.Period("2000-03", freq="M")], pd.PeriodDtype("M")), + ([1], pd.SparseDtype()), + ], + ) + def test_other_dtypes(self, data, dtype): + df = pd.DataFrame(data, dtype=dtype) + result = df.append(df.iloc[0]).iloc[-1] + expected = pd.Series(data, name=0, dtype=dtype) + tm.assert_series_equal(result, expected)
- [x] closes #30445 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30558
2019-12-30T13:11:54Z
2019-12-30T17:49:41Z
2019-12-30T17:49:41Z
2019-12-30T17:54:27Z
DOC: Document behaviour of head(n), tail(n) for negative values of n except on GroupBy
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 08c7f38ce4c82..e4999ea3cc576 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4652,6 +4652,9 @@ def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries: on position. It is useful for quickly testing if your object has the right type of data in it. + For negative values of `n`, this function returns all rows except + the last `n` rows, equivalent to ``df[:-n]``. + Parameters ---------- n : int, default 5 @@ -4659,7 +4662,7 @@ def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries: Returns ------- - obj_head : same type as caller + same type as caller The first `n` rows of the caller object. See Also @@ -4699,6 +4702,17 @@ def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries: 0 alligator 1 bee 2 falcon + + For negative values of `n` + + >>> df.head(-3) + animal + 0 alligator + 1 bee + 2 falcon + 3 lion + 4 monkey + 5 parrot """ return self.iloc[:n] @@ -4711,6 +4725,9 @@ def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries: position. It is useful for quickly verifying data, for example, after sorting or appending rows. + For negative values of `n`, this function returns all rows except + the first `n` rows, equivalent to ``df[n:]``. + Parameters ---------- n : int, default 5 @@ -4758,6 +4775,17 @@ def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries: 6 shark 7 whale 8 zebra + + For negative values of `n` + + >>> df.tail(-3) + animal + 3 lion + 4 monkey + 5 parrot + 6 shark + 7 whale + 8 zebra """ if n == 0: diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 227547daf3668..6c7c3c1a57d6f 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2377,6 +2377,8 @@ def head(self, n=5): from the original DataFrame with original index and order preserved (``as_index`` flag is ignored). + Does not work for negative values of `n`. + Returns ------- Series or DataFrame @@ -2390,6 +2392,10 @@ def head(self, n=5): A B 0 1 2 2 5 6 + >>> df.groupby('A').head(-1) + Empty DataFrame + Columns: [A, B] + Index: [] """ self._reset_group_selection() mask = self._cumcount_array() < n @@ -2405,6 +2411,8 @@ def tail(self, n=5): from the original DataFrame with original index and order preserved (``as_index`` flag is ignored). + Does not work for negative values of `n`. + Returns ------- Series or DataFrame @@ -2418,6 +2426,10 @@ def tail(self, n=5): A B 1 a 2 3 b 2 + >>> df.groupby('A').tail(-1) + Empty DataFrame + Columns: [A, B] + Index: [] """ self._reset_group_selection() mask = self._cumcount_array(ascending=False) < n
- [x] xref #30192 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Output of `python3 scripts/validate_docstrings.py pandas.DataFrame.head` ```bash ################################################################################ ###################### Docstring (pandas.DataFrame.head) ###################### ################################################################################ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `n` rows, equivalent to ``df[:-n]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.head" correct. :) ``` Output of `python3 scripts/validate_docstrings.py pandas.DataFrame.tail` ```bash ################################################################################ ###################### Docstring (pandas.DataFrame.tail) ###################### ################################################################################ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `n` rows, equivalent to ``df[n:]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.tail" correct. :) ``` Output of `./ci/code_checks.sh doctests` passed for `groupby.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/30556
2019-12-30T10:55:14Z
2019-12-30T22:13:40Z
2019-12-30T22:13:40Z
2019-12-30T22:13:49Z
TST: Regression testing for fixed issues
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 8f88f68c69f2b..795f76d94cc25 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -588,6 +588,20 @@ def test_groupby_multiple_columns(df, op): tm.assert_series_equal(result, expected) +def test_as_index_select_column(): + # GH 5764 + df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"]) + result = df.groupby("A", as_index=False)["B"].get_group(1) + expected = pd.Series([2, 4], name="B") + tm.assert_series_equal(result, expected) + + result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum()) + expected = pd.Series( + [2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)]) + ) + tm.assert_series_equal(result, expected) + + def test_groupby_as_index_agg(df): grouped = df.groupby("A", as_index=False) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index f8546ea636959..2a82b39b646c0 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -1133,3 +1133,40 @@ def func(grp): expected = pd.DataFrame([2, -2, 2, 4], columns=["B"]) tm.assert_frame_equal(result, expected) + + +def test_transform_lambda_indexing(): + # GH 7883 + df = pd.DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "flux", "foo", "flux"], + "B": ["one", "one", "two", "three", "two", "six", "five", "three"], + "C": range(8), + "D": range(8), + "E": range(8), + } + ) + df = df.set_index(["A", "B"]) + df = df.sort_index() + result = df.groupby(level="A").transform(lambda x: x.iloc[-1]) + expected = DataFrame( + { + "C": [3, 3, 7, 7, 4, 4, 4, 4], + "D": [3, 3, 7, 7, 4, 4, 4, 4], + "E": [3, 3, 7, 7, 4, 4, 4, 4], + }, + index=MultiIndex.from_tuples( + [ + ("bar", "one"), + ("bar", "three"), + ("flux", "six"), + ("flux", "three"), + ("foo", "five"), + ("foo", "one"), + ("foo", "two"), + ("foo", "two"), + ], + names=["A", "B"], + ), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 37976d89ecba4..7187733fc91c3 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -736,6 +736,12 @@ def test_get_indexer(self): expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) + def test_get_indexer_nan(self): + # GH 7820 + result = Index([1, 2, np.nan]).get_indexer([np.nan]) + expected = np.array([2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + def test_intersection(self): index = self.create_index() other = Index([1, 2, 3, 4, 5]) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 8b3620e8cd843..9119ca0a4511b 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -966,3 +966,17 @@ def test_loc_getitem_label_list_integer_labels( expected = df.iloc[:, expected_columns] result = df.loc[["A", "B", "C"], column_key] tm.assert_frame_equal(result, expected, check_column_type=check_column_type) + + +def test_loc_setitem_float_intindex(): + # GH 8720 + rand_data = np.random.randn(8, 4) + result = pd.DataFrame(rand_data) + result.loc[:, 0.5] = np.nan + expected_data = np.hstack((rand_data, np.array([np.nan] * 8).reshape(8, 1))) + expected = pd.DataFrame(expected_data, columns=[0.0, 1.0, 2.0, 3.0, 0.5]) + tm.assert_frame_equal(result, expected) + + result = pd.DataFrame(rand_data) + result.loc[:, 0.5] = np.nan + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 24233a0ec84b1..f86d5480ddafa 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -1,3 +1,4 @@ +import io import os import sys @@ -563,3 +564,17 @@ def test_to_csv_na_rep_long_string(self, df_new_type): result = df.to_csv(index=False, na_rep="mynull", encoding="ascii") assert expected == result + + def test_to_csv_timedelta_precision(self): + # GH 6783 + s = pd.Series([1, 1]).astype("timedelta64[ns]") + buf = io.StringIO() + s.to_csv(buf) + result = buf.getvalue() + expected_rows = [ + ",0", + "0,0 days 00:00:00.000000001", + "1,0 days 00:00:00.000000001", + ] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 42a4a55988b0f..007a068125d43 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -2207,3 +2207,13 @@ def test_first_row_bom(all_parsers): result = parser.read_csv(StringIO(data), delimiter="\t") expected = DataFrame(columns=["Head1", "Head2", "Head3"]) tm.assert_frame_equal(result, expected) + + +def test_integer_precision(all_parsers): + # Gh 7072 + s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765 +5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389""" + parser = all_parsers + result = parser.read_csv(StringIO(s), header=None)[4] + expected = Series([4321583677327450765, 4321113141090630389], name=4) + tm.assert_series_equal(result, expected)
- [x] closes #5764 - [x] closes #7883 - [x] closes #7820 - [x] closes #8720 - [x] closes #6783 - [x] closes #7072 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/30554
2019-12-30T07:00:09Z
2019-12-31T15:04:45Z
2019-12-31T15:04:44Z
2020-01-01T01:43:01Z
TST/BUG: fix incorrectly-passing Exception in test_html
diff --git a/pandas/io/html.py b/pandas/io/html.py index 809ce77eef0bb..75cb0fafaa6b3 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -591,9 +591,14 @@ def _setup_build_doc(self): def _build_doc(self): from bs4 import BeautifulSoup - return BeautifulSoup( - self._setup_build_doc(), features="html5lib", from_encoding=self.encoding - ) + bdoc = self._setup_build_doc() + if isinstance(bdoc, bytes) and self.encoding is not None: + udoc = bdoc.decode(self.encoding) + from_encoding = None + else: + udoc = bdoc + from_encoding = self.encoding + return BeautifulSoup(udoc, features="html5lib", from_encoding=from_encoding) def _build_xpath_expr(attrs) -> str: diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 626df839363cb..7a814ce82fd73 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1158,9 +1158,9 @@ def test_displayed_only(self, displayed_only, exp0, exp1): assert len(dfs) == 1 # Should not parse hidden table def test_encode(self, html_encoding_file): - _, encoding = os.path.splitext(os.path.basename(html_encoding_file))[0].split( - "_" - ) + base_path = os.path.basename(html_encoding_file) + root = os.path.splitext(base_path)[0] + _, encoding = root.split("_") try: with open(html_encoding_file, "rb") as fobj: @@ -1183,7 +1183,7 @@ def test_encode(self, html_encoding_file): if is_platform_windows(): if "16" in encoding or "32" in encoding: pytest.skip() - raise + raise def test_parse_failure_unseekable(self): # Issue #17975
A `raise` is indented one indent further than it should be. Fixing this surfaces a failing test that is fixed by decoding bytes before passing it to bs4.
https://api.github.com/repos/pandas-dev/pandas/pulls/30553
2019-12-30T03:23:00Z
2020-01-18T17:20:04Z
2020-01-18T17:20:04Z
2020-01-18T17:24:11Z
TYP: check_untyped_defs core.computation.eval
diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py index dbfd6c04eee32..9c5388faae1bd 100644 --- a/pandas/core/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -3,6 +3,7 @@ """ import abc +from typing import Dict, Type from pandas.core.computation.align import align_terms, reconstruct_object from pandas.core.computation.ops import _mathops, _reductions @@ -53,7 +54,7 @@ def convert(self) -> str: """ return printing.pprint_thing(self.expr) - def evaluate(self): + def evaluate(self) -> object: """ Run the engine on the expression. @@ -62,7 +63,7 @@ def evaluate(self): Returns ------- - obj : object + object The result of the passed expression. """ if not self._is_aligned: @@ -101,12 +102,6 @@ class NumExprEngine(AbstractEngine): has_neg_frac = True - def __init__(self, expr): - super().__init__(expr) - - def convert(self) -> str: - return str(super().convert()) - def _evaluate(self): import numexpr as ne @@ -128,14 +123,14 @@ class PythonEngine(AbstractEngine): has_neg_frac = False - def __init__(self, expr): - super().__init__(expr) - def evaluate(self): return self.expr() - def _evaluate(self): + def _evaluate(self) -> None: pass -_engines = {"numexpr": NumExprEngine, "python": PythonEngine} +_engines: Dict[str, Type[AbstractEngine]] = { + "numexpr": NumExprEngine, + "python": PythonEngine, +} diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index 2e5a563b815b3..7599a82ddffed 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -5,6 +5,7 @@ """ import tokenize +from typing import Optional import warnings from pandas._libs.lib import _no_default @@ -17,7 +18,7 @@ from pandas.io.formats.printing import pprint_thing -def _check_engine(engine): +def _check_engine(engine: Optional[str]) -> str: """ Make sure a valid engine is passed. @@ -168,7 +169,7 @@ def _check_for_locals(expr: str, stack_level: int, parser: str): def eval( expr, parser="pandas", - engine=None, + engine: Optional[str] = None, truediv=_no_default, local_dict=None, global_dict=None, diff --git a/setup.cfg b/setup.cfg index 8fb602188dad5..0c93ca878d6b4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -172,9 +172,6 @@ check_untyped_defs=False [mypy-pandas.core.computation.align] check_untyped_defs=False -[mypy-pandas.core.computation.eval] -check_untyped_defs=False - [mypy-pandas.core.computation.expr] check_untyped_defs=False
pandas\core\computation\eval.py:334: error: Cannot instantiate abstract class 'AbstractEngine' with abstract attribute '_evaluate'
https://api.github.com/repos/pandas-dev/pandas/pulls/30551
2019-12-29T22:08:29Z
2019-12-30T13:28:32Z
2019-12-30T13:28:32Z
2019-12-30T13:55:41Z
TYP: check_untyped_defs pandas.core.computation.align
diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 57348ad3b81a0..a1b1cffdd1d76 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -2,10 +2,12 @@ """ from functools import partial, wraps +from typing import Dict, Optional, Sequence, Tuple, Type, Union import warnings import numpy as np +from pandas._typing import FrameOrSeries from pandas.errors import PerformanceWarning from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries @@ -15,22 +17,27 @@ from pandas.core.computation.common import result_type_many -def _align_core_single_unary_op(term): +def _align_core_single_unary_op( + term, +) -> Tuple[Union[partial, Type[FrameOrSeries]], Optional[Dict[str, int]]]: + + typ: Union[partial, Type[FrameOrSeries]] + axes: Optional[Dict[str, int]] = None + if isinstance(term.value, np.ndarray): typ = partial(np.asanyarray, dtype=term.value.dtype) else: typ = type(term.value) - ret = (typ,) + if hasattr(term.value, "axes"): + axes = _zip_axes_from_type(typ, term.value.axes) - if not hasattr(term.value, "axes"): - ret += (None,) - else: - ret += (_zip_axes_from_type(typ, term.value.axes),) - return ret + return typ, axes -def _zip_axes_from_type(typ, new_axes): - axes = {ax_name: new_axes[ax_ind] for ax_ind, ax_name in typ._AXIS_NAMES.items()} +def _zip_axes_from_type( + typ: Type[FrameOrSeries], new_axes: Sequence[int] +) -> Dict[str, int]: + axes = {name: new_axes[i] for i, name in typ._AXIS_NAMES.items()} return axes diff --git a/setup.cfg b/setup.cfg index 84b3f9409b9ba..d4d4dd9e41431 100644 --- a/setup.cfg +++ b/setup.cfg @@ -163,9 +163,6 @@ check_untyped_defs=False [mypy-pandas.core.base] check_untyped_defs=False -[mypy-pandas.core.computation.align] -check_untyped_defs=False - [mypy-pandas.core.computation.expr] check_untyped_defs=False
pandas\core\computation\align.py:22: error: Incompatible types in assignment (expression has type "Type[Any]", variable has type "partial[Any]") pandas\core\computation\align.py:26: error: Incompatible types in assignment (expression has type "Tuple[partial[Any], None]", variable has type "Tuple[partial[Any]]") pandas\core\computation\align.py:28: error: Incompatible types in assignment (expression has type "Tuple[partial[Any], Any]", variable has type "Tuple[partial[Any]]")
https://api.github.com/repos/pandas-dev/pandas/pulls/30550
2019-12-29T21:38:07Z
2019-12-31T12:41:02Z
2019-12-31T12:41:02Z
2019-12-31T14:02:44Z
TYP: check_untyped_defs astype_nansafe
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index a8fcd6d03847c..f0cc3e1c3ae59 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -524,7 +524,7 @@ def astype(self, dtype, copy=True): na_value = np.nan # coerce data = self._coerce_to_ndarray(na_value=na_value) - return astype_nansafe(data, dtype, copy=None) + return astype_nansafe(data, dtype, copy=False) def value_counts(self, dropna=True): """ diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 3f5a4ca49702f..ee8b2c3bb723f 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -546,7 +546,7 @@ def astype(self, dtype, copy=True): # coerce data = self._coerce_to_ndarray() - return astype_nansafe(data, dtype, copy=None) + return astype_nansafe(data, dtype, copy=False) @property def _ndarray_values(self) -> np.ndarray: diff --git a/setup.cfg b/setup.cfg index 8fb602188dad5..1ea493fab07ef 100644 --- a/setup.cfg +++ b/setup.cfg @@ -151,15 +151,9 @@ ignore_errors=True [mypy-pandas._version] check_untyped_defs=False -[mypy-pandas.core.arrays.boolean] -check_untyped_defs=False - [mypy-pandas.core.arrays.categorical] check_untyped_defs=False -[mypy-pandas.core.arrays.integer] -check_untyped_defs=False - [mypy-pandas.core.arrays.interval] check_untyped_defs=False
``` pandas\core\arrays\integer.py:549: error: Argument "copy" to "astype_nansafe" has incompatible type "None"; expected "bool" pandas\core\arrays\boolean.py:527: error: Argument "copy" to "astype_nansafe" has incompatible type "None"; expected "bool" ```
https://api.github.com/repos/pandas-dev/pandas/pulls/30548
2019-12-29T20:55:00Z
2019-12-30T13:08:30Z
2019-12-30T13:08:30Z
2019-12-30T13:54:33Z
CLN: Update old .format to f-string
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index a8fcd6d03847c..b030a9a9cf139 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -755,9 +755,8 @@ def logical_method(self, other): if other_is_scalar and not (other is libmissing.NA or lib.is_bool(other)): raise TypeError( - "'other' should be pandas.NA or a bool. Got {} instead.".format( - type(other).__name__ - ) + "'other' should be pandas.NA or a bool. " + f"Got {type(other).__name__} instead." ) if not other_is_scalar and len(self) != len(other): @@ -772,7 +771,7 @@ def logical_method(self, other): return BooleanArray(result, mask) - name = "__{name}__".format(name=op.__name__) + name = f"__{op.__name__}__" return set_function_name(logical_method, name, cls) @classmethod @@ -819,7 +818,7 @@ def cmp_method(self, other): return BooleanArray(result, mask, copy=False) - name = "__{name}__".format(name=op.__name__) + name = f"__{op.__name__}" return set_function_name(cmp_method, name, cls) def _reduce(self, name, skipna=True, **kwargs): @@ -922,7 +921,7 @@ def boolean_arithmetic_method(self, other): return self._maybe_mask_result(result, mask, other, op_name) - name = "__{name}__".format(name=op_name) + name = f"__{op_name}__" return set_function_name(boolean_arithmetic_method, name, cls)
Reviewed and updated to f-string for: pandas/compat/pickle_compat.py pandas/_config/config.py pandas/core/arrays/boolean.py - [x] Contributes to #29547 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Marking pickle_compat.py as done but I left the existing `.format` as is, as I believe this is a good use case for `.format`
https://api.github.com/repos/pandas-dev/pandas/pulls/30547
2019-12-29T20:47:00Z
2019-12-30T08:09:56Z
2019-12-30T08:09:55Z
2019-12-30T08:10:07Z
DEPR: DataFrame GroupBy indexing with single items DeprecationWarning
diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst index 69bb700c97b15..4e284fe7b5968 100644 --- a/doc/source/getting_started/comparison/comparison_with_sas.rst +++ b/doc/source/getting_started/comparison/comparison_with_sas.rst @@ -629,7 +629,7 @@ for more details and examples. .. ipython:: python - tips_summed = tips.groupby(['sex', 'smoker'])['total_bill', 'tip'].sum() + tips_summed = tips.groupby(['sex', 'smoker'])[['total_bill', 'tip']].sum() tips_summed.head() diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst index db687386329bb..fec6bae1e0330 100644 --- a/doc/source/getting_started/comparison/comparison_with_stata.rst +++ b/doc/source/getting_started/comparison/comparison_with_stata.rst @@ -617,7 +617,7 @@ for more details and examples. .. ipython:: python - tips_summed = tips.groupby(['sex', 'smoker'])['total_bill', 'tip'].sum() + tips_summed = tips.groupby(['sex', 'smoker'])[['total_bill', 'tip']].sum() tips_summed.head() diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index e524b8d2fbf8c..2404e60323294 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -584,6 +584,37 @@ Deprecations - :meth:`DataFrame.to_stata`, :meth:`DataFrame.to_feather`, and :meth:`DataFrame.to_parquet` argument "fname" is deprecated, use "path" instead (:issue:`23574`) - The deprecated internal attributes ``_start``, ``_stop`` and ``_step`` of :class:`RangeIndex` now raise a ``FutureWarning`` instead of a ``DeprecationWarning`` (:issue:`26581`) +**Selecting Columns from a Grouped DataFrame** + +When selecting columns from a :class:`DataFrameGroupBy` object, passing individual keys (or a tuple of keys) inside single brackets is deprecated, +a list of items should be used instead. (:issue:`23566`) For example: + +.. code-block:: ipython + + df = pd.DataFrame({ + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": np.random.randn(8), + "C": np.random.randn(8), + }) + g = df.groupby('A') + + # single key, returns SeriesGroupBy + g['B'] + + # tuple of single key, returns SeriesGroupBy + g[('B',)] + + # tuple of multiple keys, returns DataFrameGroupBy, raises FutureWarning + g[('B', 'C')] + + # multiple keys passed directly, returns DataFrameGroupBy, raises FutureWarning + # (implicitly converts the passed strings into a single tuple) + g['B', 'C'] + + # proper way, returns DataFrameGroupBy + g[['B', 'C']] + + .. _whatsnew_1000.prior_deprecations: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 27afd8ca018ac..c49677fa27a31 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -25,6 +25,7 @@ Union, cast, ) +import warnings import numpy as np @@ -326,7 +327,7 @@ def _aggregate_multiple_funcs(self, arg): return DataFrame(results, columns=columns) def _wrap_series_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Index, + self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Index ) -> Union[Series, DataFrame]: """ Wraps the output of a SeriesGroupBy operation into the expected result. @@ -1578,6 +1579,19 @@ def filter(self, func, dropna=True, *args, **kwargs): return self._apply_filter(indices, dropna) + def __getitem__(self, key): + # per GH 23566 + if isinstance(key, tuple) and len(key) > 1: + # if len == 1, then it becomes a SeriesGroupBy and this is actually + # valid syntax, so don't raise warning + warnings.warn( + "Indexing with multiple keys (implicitly converted to a tuple " + "of keys) will be deprecated, use a list instead.", + FutureWarning, + stacklevel=2, + ) + return super().__getitem__(key) + def _gotitem(self, key, ndim: int, subset=None): """ sub-classes to define diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index f2af397357e4f..04c707acafab2 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -71,14 +71,12 @@ def test_getitem_list_of_columns(self): ) result = df.groupby("A")[["C", "D"]].mean() - result2 = df.groupby("A")["C", "D"].mean() - result3 = df.groupby("A")[df.columns[2:4]].mean() + result2 = df.groupby("A")[df.columns[2:4]].mean() expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean() tm.assert_frame_equal(result, expected) tm.assert_frame_equal(result2, expected) - tm.assert_frame_equal(result3, expected) def test_getitem_numeric_column_names(self): # GH #13731 @@ -91,14 +89,40 @@ def test_getitem_numeric_column_names(self): } ) result = df.groupby(0)[df.columns[1:3]].mean() - result2 = df.groupby(0)[2, 4].mean() - result3 = df.groupby(0)[[2, 4]].mean() + result2 = df.groupby(0)[[2, 4]].mean() expected = df.loc[:, [0, 2, 4]].groupby(0).mean() tm.assert_frame_equal(result, expected) tm.assert_frame_equal(result2, expected) - tm.assert_frame_equal(result3, expected) + + # per GH 23566 this should raise a FutureWarning + with tm.assert_produces_warning(FutureWarning): + df.groupby(0)[2, 4].mean() + + def test_getitem_single_list_of_columns(self, df): + # per GH 23566 this should raise a FutureWarning + with tm.assert_produces_warning(FutureWarning): + df.groupby("A")["C", "D"].mean() + + def test_getitem_single_column(self): + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.randn(8), + "D": np.random.randn(8), + "E": np.random.randn(8), + } + ) + + result = df.groupby("A")["C"].mean() + + as_frame = df.loc[:, ["A", "C"]].groupby("A").mean() + as_series = as_frame.iloc[:, 0] + expected = as_series + + tm.assert_series_equal(result, expected) # grouping diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 2a82b39b646c0..27dd314f0df8e 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -319,7 +319,7 @@ def test_dispatch_transform(tsframe): def test_transform_select_columns(df): f = lambda x: x.mean() - result = df.groupby("A")["C", "D"].transform(f) + result = df.groupby("A")[["C", "D"]].transform(f) selection = df[["C", "D"]] expected = selection.groupby(df["A"]).transform(f)
- [x] closes #23566 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30546
2019-12-29T20:11:10Z
2020-01-03T02:13:49Z
2020-01-03T02:13:48Z
2020-01-03T03:05:19Z
CLN: Remove dead IntervalIndex code
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index ce0716e36cdf3..52df491725504 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -103,19 +103,6 @@ def _get_prev_label(label): raise TypeError(f"cannot determine next label for type {repr(type(label))}") -def _get_interval_closed_bounds(interval): - """ - Given an Interval or IntervalIndex, return the corresponding interval with - closed bounds. - """ - left, right = interval.left, interval.right - if interval.open_left: - left = _get_next_label(left) - if interval.open_right: - right = _get_prev_label(right) - return left, right - - def _new_IntervalIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have @@ -675,26 +662,6 @@ def _convert_list_indexer(self, keyarr, kind=None): return locs - def _maybe_cast_indexed(self, key): - """ - we need to cast the key, which could be a scalar - or an array-like to the type of our subtype - """ - if isinstance(key, IntervalIndex): - return key - - subtype = self.dtype.subtype - if is_float_dtype(subtype): - if is_integer(key): - key = float(key) - elif isinstance(key, (np.ndarray, Index)): - key = key.astype("float64") - elif is_integer_dtype(subtype): - if is_integer(key): - key = int(key) - - return key - def _can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. @@ -827,34 +794,6 @@ def _searchsorted_monotonic(self, label, side, exclude_label=False): return sub_idx._searchsorted_monotonic(label, side) - def _find_non_overlapping_monotonic_bounds(self, key): - if isinstance(key, IntervalMixin): - start = self._searchsorted_monotonic( - key.left, "left", exclude_label=key.open_left - ) - stop = self._searchsorted_monotonic( - key.right, "right", exclude_label=key.open_right - ) - elif isinstance(key, slice): - # slice - start, stop = key.start, key.stop - if (key.step or 1) != 1: - raise NotImplementedError("cannot slice with a slice step") - if start is None: - start = 0 - else: - start = self._searchsorted_monotonic(start, "left") - if stop is None: - stop = len(self) - else: - stop = self._searchsorted_monotonic(stop, "right") - else: - # scalar or index-like - - start = self._searchsorted_monotonic(key, "left") - stop = self._searchsorted_monotonic(key, "right") - return start, stop - def get_loc( self, key: Any, method: Optional[str] = None, tolerance=None ) -> Union[int, slice, np.ndarray]:
xref https://github.com/pandas-dev/pandas/pull/30459#issuecomment-568992710
https://api.github.com/repos/pandas-dev/pandas/pulls/30545
2019-12-29T19:27:11Z
2019-12-30T13:09:20Z
2019-12-30T13:09:20Z
2019-12-30T21:47:31Z
TST: XFAIL Travis read_html tests
diff --git a/.travis.yml b/.travis.yml index 0c7740295b637..25b7cd02a6599 100644 --- a/.travis.yml +++ b/.travis.yml @@ -48,17 +48,12 @@ matrix: - mysql - postgresql - # In allow_failures - env: - JOB="3.6, slow" ENV_FILE="ci/deps/travis-36-slow.yaml" PATTERN="slow" SQL="1" services: - mysql - postgresql - allow_failures: - - env: - - JOB="3.6, slow" ENV_FILE="ci/deps/travis-36-slow.yaml" PATTERN="slow" SQL="1" - before_install: - echo "before_install" # set non-blocking IO on travis diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml index 2bb2b00319382..48ac50c001715 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-36-locale_slow.yaml @@ -13,7 +13,7 @@ dependencies: - pytest-azurepipelines # pandas dependencies - - beautifulsoup4==4.6.0 + - beautifulsoup4=4.6.0 - bottleneck=1.2.* - lxml - matplotlib=2.2.2 diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index bc26615d1aad5..2bb412cf6eab5 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -383,7 +383,15 @@ def test_thousands_macau_stats(self, datapath): assert not any(s.isna().any() for _, s in df.items()) @pytest.mark.slow - def test_thousands_macau_index_col(self, datapath): + def test_thousands_macau_index_col(self, datapath, request): + # https://github.com/pandas-dev/pandas/issues/29622 + # This tests fails for bs4 >= 4.8.0 - so handle xfail accordingly + if self.read_html.keywords.get("flavor") == "bs4" and td.safe_import( + "bs4", "4.8.0" + ): + reason = "fails for bs4 version >= 4.8.0" + request.node.add_marker(pytest.mark.xfail(reason=reason)) + all_non_nan_table_index = -2 macau_data = datapath("io", "data", "html", "macau.html") dfs = self.read_html(macau_data, index_col=0, header=0)
- [x] xref #29622
https://api.github.com/repos/pandas-dev/pandas/pulls/30544
2019-12-29T18:56:08Z
2019-12-31T17:22:06Z
2019-12-31T17:22:06Z
2019-12-31T17:22:13Z
REF: Refactor window/test_moments.py
diff --git a/pandas/tests/window/common.py b/pandas/tests/window/common.py index 1dfc0f34b2b8d..c3648bc619c50 100644 --- a/pandas/tests/window/common.py +++ b/pandas/tests/window/common.py @@ -3,7 +3,8 @@ import numpy as np from numpy.random import randn -from pandas import DataFrame, Series, bdate_range +from pandas import DataFrame, Series, bdate_range, notna +import pandas.util.testing as tm N, K = 100, 10 @@ -21,3 +22,329 @@ def _create_data(self): self.rng = bdate_range(datetime(2009, 1, 1), periods=N) self.series = Series(arr.copy(), index=self.rng) self.frame = DataFrame(randn(N, K), index=self.rng, columns=np.arange(K)) + + +# create the data only once as we are not setting it +def _create_consistency_data(): + def create_series(): + return [ + Series(dtype=object), + Series([np.nan]), + Series([np.nan, np.nan]), + Series([3.0]), + Series([np.nan, 3.0]), + Series([3.0, np.nan]), + Series([1.0, 3.0]), + Series([2.0, 2.0]), + Series([3.0, 1.0]), + Series( + [5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan] + ), + Series( + [ + np.nan, + 5.0, + 5.0, + 5.0, + np.nan, + np.nan, + np.nan, + 5.0, + 5.0, + np.nan, + np.nan, + ] + ), + Series( + [ + np.nan, + np.nan, + 5.0, + 5.0, + np.nan, + np.nan, + np.nan, + 5.0, + 5.0, + np.nan, + np.nan, + ] + ), + Series( + [ + np.nan, + 3.0, + np.nan, + 3.0, + 4.0, + 5.0, + 6.0, + np.nan, + np.nan, + 7.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series( + [ + np.nan, + 5.0, + np.nan, + 2.0, + 4.0, + 0.0, + 9.0, + np.nan, + np.nan, + 3.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series( + [ + 2.0, + 3.0, + np.nan, + 3.0, + 4.0, + 5.0, + 6.0, + np.nan, + np.nan, + 7.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series( + [ + 2.0, + 5.0, + np.nan, + 2.0, + 4.0, + 0.0, + 9.0, + np.nan, + np.nan, + 3.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series(range(10)), + Series(range(20, 0, -2)), + ] + + def create_dataframes(): + return [ + DataFrame(), + DataFrame(columns=["a"]), + DataFrame(columns=["a", "a"]), + DataFrame(columns=["a", "b"]), + DataFrame(np.arange(10).reshape((5, 2))), + DataFrame(np.arange(25).reshape((5, 5))), + DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]), + ] + [DataFrame(s) for s in create_series()] + + def is_constant(x): + values = x.values.ravel() + return len(set(values[notna(values)])) == 1 + + def no_nans(x): + return x.notna().all().all() + + # data is a tuple(object, is_constant, no_nans) + data = create_series() + create_dataframes() + + return [(x, is_constant(x), no_nans(x)) for x in data] + + +_consistency_data = _create_consistency_data() + + +class ConsistencyBase(Base): + base_functions = [ + (lambda v: Series(v).count(), None, "count"), + (lambda v: Series(v).max(), None, "max"), + (lambda v: Series(v).min(), None, "min"), + (lambda v: Series(v).sum(), None, "sum"), + (lambda v: Series(v).mean(), None, "mean"), + (lambda v: Series(v).std(), 1, "std"), + (lambda v: Series(v).cov(Series(v)), None, "cov"), + (lambda v: Series(v).corr(Series(v)), None, "corr"), + (lambda v: Series(v).var(), 1, "var"), + # restore once GH 8086 is fixed + # lambda v: Series(v).skew(), 3, 'skew'), + # (lambda v: Series(v).kurt(), 4, 'kurt'), + # restore once GH 8084 is fixed + # lambda v: Series(v).quantile(0.3), None, 'quantile'), + (lambda v: Series(v).median(), None, "median"), + (np.nanmax, 1, "max"), + (np.nanmin, 1, "min"), + (np.nansum, 1, "sum"), + (np.nanmean, 1, "mean"), + (lambda v: np.nanstd(v, ddof=1), 1, "std"), + (lambda v: np.nanvar(v, ddof=1), 1, "var"), + (np.nanmedian, 1, "median"), + ] + no_nan_functions = [ + (np.max, None, "max"), + (np.min, None, "min"), + (np.sum, None, "sum"), + (np.mean, None, "mean"), + (lambda v: np.std(v, ddof=1), 1, "std"), + (lambda v: np.var(v, ddof=1), 1, "var"), + (np.median, None, "median"), + ] + + def _create_data(self): + super()._create_data() + self.data = _consistency_data + + def _test_moments_consistency( + self, + min_periods, + count, + mean, + mock_mean, + corr, + var_unbiased=None, + std_unbiased=None, + cov_unbiased=None, + var_biased=None, + std_biased=None, + cov_biased=None, + var_debiasing_factors=None, + ): + def _non_null_values(x): + values = x.values.ravel() + return set(values[notna(values)].tolist()) + + for (x, is_constant, no_nans) in self.data: + count_x = count(x) + mean_x = mean(x) + + if mock_mean: + # check that mean equals mock_mean + expected = mock_mean(x) + tm.assert_equal(mean_x, expected.astype("float64")) + + # check that correlation of a series with itself is either 1 or NaN + corr_x_x = corr(x, x) + + # assert _non_null_values(corr_x_x).issubset(set([1.])) + # restore once rolling_cov(x, x) is identically equal to var(x) + + if is_constant: + exp = x.max() if isinstance(x, Series) else x.max().max() + + # check mean of constant series + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = exp + tm.assert_equal(mean_x, expected) + + # check correlation of constant series with itself is NaN + expected[:] = np.nan + tm.assert_equal(corr_x_x, expected) + + if var_unbiased and var_biased and var_debiasing_factors: + # check variance debiasing factors + var_unbiased_x = var_unbiased(x) + var_biased_x = var_biased(x) + var_debiasing_factors_x = var_debiasing_factors(x) + tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) + + for (std, var, cov) in [ + (std_biased, var_biased, cov_biased), + (std_unbiased, var_unbiased, cov_unbiased), + ]: + + # check that var(x), std(x), and cov(x) are all >= 0 + var_x = var(x) + std_x = std(x) + assert not (var_x < 0).any().any() + assert not (std_x < 0).any().any() + if cov: + cov_x_x = cov(x, x) + assert not (cov_x_x < 0).any().any() + + # check that var(x) == cov(x, x) + tm.assert_equal(var_x, cov_x_x) + + # check that var(x) == std(x)^2 + tm.assert_equal(var_x, std_x * std_x) + + if var is var_biased: + # check that biased var(x) == mean(x^2) - mean(x)^2 + mean_x2 = mean(x * x) + tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) + + if is_constant: + # check that variance of constant series is identically 0 + assert not (var_x > 0).any().any() + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = 0.0 + if var is var_unbiased: + expected[count_x < 2] = np.nan + tm.assert_equal(var_x, expected) + + if isinstance(x, Series): + for (y, is_constant, no_nans) in self.data: + if not x.isna().equals(y.isna()): + # can only easily test two Series with similar + # structure + continue + + # check that cor(x, y) is symmetric + corr_x_y = corr(x, y) + corr_y_x = corr(y, x) + tm.assert_equal(corr_x_y, corr_y_x) + + if cov: + # check that cov(x, y) is symmetric + cov_x_y = cov(x, y) + cov_y_x = cov(y, x) + tm.assert_equal(cov_x_y, cov_y_x) + + # check that cov(x, y) == (var(x+y) - var(x) - + # var(y)) / 2 + var_x_plus_y = var(x + y) + var_y = var(y) + tm.assert_equal( + cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y) + ) + + # check that corr(x, y) == cov(x, y) / (std(x) * + # std(y)) + std_y = std(y) + tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) + + if cov is cov_biased: + # check that biased cov(x, y) == mean(x*y) - + # mean(x)*mean(y) + mean_y = mean(y) + mean_x_times_y = mean(x * y) + tm.assert_equal( + cov_x_y, mean_x_times_y - (mean_x * mean_y) + ) + + def _check_pairwise_moment(self, dispatch, name, **kwargs): + def get_result(obj, obj2=None): + return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2) + + result = get_result(self.frame) + result = result.loc[(slice(None), 1), 5] + result.index = result.index.droplevel(1) + expected = get_result(self.frame[1], self.frame[5]) + tm.assert_series_equal(result, expected, check_names=False) diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py new file mode 100644 index 0000000000000..bf2bd1420b7f4 --- /dev/null +++ b/pandas/tests/window/moments/test_moments_ewm.py @@ -0,0 +1,428 @@ +import numpy as np +from numpy.random import randn +import pytest + +import pandas as pd +from pandas import DataFrame, Series, concat +from pandas.tests.window.common import Base, ConsistencyBase +import pandas.util.testing as tm + + +@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning") +class TestMoments(Base): + def setup_method(self, method): + self._create_data() + + def test_ewma(self): + self._check_ew(name="mean") + + vals = pd.Series(np.zeros(1000)) + vals[5] = 1 + result = vals.ewm(span=100, adjust=False).mean().sum() + assert np.abs(result - 1) < 1e-2 + + @pytest.mark.parametrize("adjust", [True, False]) + @pytest.mark.parametrize("ignore_na", [True, False]) + def test_ewma_cases(self, adjust, ignore_na): + # try adjust/ignore_na args matrix + + s = Series([1.0, 2.0, 4.0, 8.0]) + + if adjust: + expected = Series([1.0, 1.6, 2.736842, 4.923077]) + else: + expected = Series([1.0, 1.333333, 2.222222, 4.148148]) + + result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() + tm.assert_series_equal(result, expected) + + def test_ewma_nan_handling(self): + s = Series([1.0] + [np.nan] * 5 + [1.0]) + result = s.ewm(com=5).mean() + tm.assert_series_equal(result, Series([1.0] * len(s))) + + s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0]) + result = s.ewm(com=5).mean() + tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4)) + + # GH 7603 + s0 = Series([np.nan, 1.0, 101.0]) + s1 = Series([1.0, np.nan, 101.0]) + s2 = Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]) + s3 = Series([1.0, np.nan, 101.0, 50.0]) + com = 2.0 + alpha = 1.0 / (1.0 + com) + + def simple_wma(s, w): + return (s.multiply(w).cumsum() / w.cumsum()).fillna(method="ffill") + + for (s, adjust, ignore_na, w) in [ + (s0, True, False, [np.nan, (1.0 - alpha), 1.0]), + (s0, True, True, [np.nan, (1.0 - alpha), 1.0]), + (s0, False, False, [np.nan, (1.0 - alpha), alpha]), + (s0, False, True, [np.nan, (1.0 - alpha), alpha]), + (s1, True, False, [(1.0 - alpha) ** 2, np.nan, 1.0]), + (s1, True, True, [(1.0 - alpha), np.nan, 1.0]), + (s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]), + (s1, False, True, [(1.0 - alpha), np.nan, alpha]), + ( + s2, + True, + False, + [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan], + ), + (s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]), + ( + s2, + False, + False, + [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, alpha, np.nan], + ), + (s2, False, True, [np.nan, (1.0 - alpha), np.nan, np.nan, alpha, np.nan]), + (s3, True, False, [(1.0 - alpha) ** 3, np.nan, (1.0 - alpha), 1.0]), + (s3, True, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha), 1.0]), + ( + s3, + False, + False, + [ + (1.0 - alpha) ** 3, + np.nan, + (1.0 - alpha) * alpha, + alpha * ((1.0 - alpha) ** 2 + alpha), + ], + ), + ( + s3, + False, + True, + [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha], + ), + ]: + expected = simple_wma(s, Series(w)) + result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean() + + tm.assert_series_equal(result, expected) + if ignore_na is False: + # check that ignore_na defaults to False + result = s.ewm(com=com, adjust=adjust).mean() + tm.assert_series_equal(result, expected) + + def test_ewmvar(self): + self._check_ew(name="var") + + def test_ewmvol(self): + self._check_ew(name="vol") + + def test_ewma_span_com_args(self): + A = self.series.ewm(com=9.5).mean() + B = self.series.ewm(span=20).mean() + tm.assert_almost_equal(A, B) + + with pytest.raises(ValueError): + self.series.ewm(com=9.5, span=20) + with pytest.raises(ValueError): + self.series.ewm().mean() + + def test_ewma_halflife_arg(self): + A = self.series.ewm(com=13.932726172912965).mean() + B = self.series.ewm(halflife=10.0).mean() + tm.assert_almost_equal(A, B) + + with pytest.raises(ValueError): + self.series.ewm(span=20, halflife=50) + with pytest.raises(ValueError): + self.series.ewm(com=9.5, halflife=50) + with pytest.raises(ValueError): + self.series.ewm(com=9.5, span=20, halflife=50) + with pytest.raises(ValueError): + self.series.ewm() + + def test_ewm_alpha(self): + # GH 10789 + s = Series(self.arr) + a = s.ewm(alpha=0.61722699889169674).mean() + b = s.ewm(com=0.62014947789973052).mean() + c = s.ewm(span=2.240298955799461).mean() + d = s.ewm(halflife=0.721792864318).mean() + tm.assert_series_equal(a, b) + tm.assert_series_equal(a, c) + tm.assert_series_equal(a, d) + + def test_ewm_alpha_arg(self): + # GH 10789 + s = self.series + with pytest.raises(ValueError): + s.ewm() + with pytest.raises(ValueError): + s.ewm(com=10.0, alpha=0.5) + with pytest.raises(ValueError): + s.ewm(span=10.0, alpha=0.5) + with pytest.raises(ValueError): + s.ewm(halflife=10.0, alpha=0.5) + + def test_ewm_domain_checks(self): + # GH 12492 + s = Series(self.arr) + msg = "comass must satisfy: comass >= 0" + with pytest.raises(ValueError, match=msg): + s.ewm(com=-0.1) + s.ewm(com=0.0) + s.ewm(com=0.1) + + msg = "span must satisfy: span >= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(span=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.0) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.9) + s.ewm(span=1.0) + s.ewm(span=1.1) + + msg = "halflife must satisfy: halflife > 0" + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=0.0) + s.ewm(halflife=0.1) + + msg = "alpha must satisfy: 0 < alpha <= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=0.0) + s.ewm(alpha=0.1) + s.ewm(alpha=1.0) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=1.1) + + @pytest.mark.parametrize("method", ["mean", "vol", "var"]) + def test_ew_empty_series(self, method): + vals = pd.Series([], dtype=np.float64) + + ewm = vals.ewm(3) + result = getattr(ewm, method)() + tm.assert_almost_equal(result, vals) + + def _check_ew(self, name=None, preserve_nan=False): + series_result = getattr(self.series.ewm(com=10), name)() + assert isinstance(series_result, Series) + + frame_result = getattr(self.frame.ewm(com=10), name)() + assert type(frame_result) == DataFrame + + result = getattr(self.series.ewm(com=10), name)() + if preserve_nan: + assert result[self._nan_locs].isna().all() + + # excluding NaNs correctly + arr = randn(50) + arr[:10] = np.NaN + arr[-10:] = np.NaN + s = Series(arr) + + # check min_periods + # GH 7898 + result = getattr(s.ewm(com=50, min_periods=2), name)() + assert result[:11].isna().all() + assert not result[11:].isna().any() + + for min_periods in (0, 1): + result = getattr(s.ewm(com=50, min_periods=min_periods), name)() + if name == "mean": + assert result[:10].isna().all() + assert not result[10:].isna().any() + else: + # ewm.std, ewm.vol, ewm.var (with bias=False) require at least + # two values + assert result[:11].isna().all() + assert not result[11:].isna().any() + + # check series of length 0 + result = getattr( + Series(dtype=object).ewm(com=50, min_periods=min_periods), name + )() + tm.assert_series_equal(result, Series(dtype="float64")) + + # check series of length 1 + result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)() + if name == "mean": + tm.assert_series_equal(result, Series([1.0])) + else: + # ewm.std, ewm.vol, ewm.var with bias=False require at least + # two values + tm.assert_series_equal(result, Series([np.NaN])) + + # pass in ints + result2 = getattr(Series(np.arange(50)).ewm(span=10), name)() + assert result2.dtype == np.float_ + + +class TestEwmMomentsConsistency(ConsistencyBase): + def setup_method(self, method): + self._create_data() + + def test_ewmcov(self): + self._check_binary_ew("cov") + + def test_ewmcov_pairwise(self): + self._check_pairwise_moment("ewm", "cov", span=10, min_periods=5) + + def test_ewmcorr(self): + self._check_binary_ew("corr") + + def test_ewmcorr_pairwise(self): + self._check_pairwise_moment("ewm", "corr", span=10, min_periods=5) + + def _check_binary_ew(self, name): + def func(A, B, com, **kwargs): + return getattr(A.ewm(com, **kwargs), name)(B) + + A = Series(randn(50), index=np.arange(50)) + B = A[2:] + randn(48) + + A[:10] = np.NaN + B[-10:] = np.NaN + + result = func(A, B, 20, min_periods=5) + assert np.isnan(result.values[:14]).all() + assert not np.isnan(result.values[14:]).any() + + # GH 7898 + for min_periods in (0, 1, 2): + result = func(A, B, 20, min_periods=min_periods) + # binary functions (ewmcov, ewmcorr) with bias=False require at + # least two values + assert np.isnan(result.values[:11]).all() + assert not np.isnan(result.values[11:]).any() + + # check series of length 0 + empty = Series([], dtype=np.float64) + result = func(empty, empty, 50, min_periods=min_periods) + tm.assert_series_equal(result, empty) + + # check series of length 1 + result = func(Series([1.0]), Series([1.0]), 50, min_periods=min_periods) + tm.assert_series_equal(result, Series([np.NaN])) + + msg = "Input arrays must be of the same type!" + # exception raised is Exception + with pytest.raises(Exception, match=msg): + func(A, randn(50), 20, min_periods=5) + + @pytest.mark.slow + @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) + @pytest.mark.parametrize("adjust", [True, False]) + @pytest.mark.parametrize("ignore_na", [True, False]) + def test_ewm_consistency(self, min_periods, adjust, ignore_na): + def _weights(s, com, adjust, ignore_na): + if isinstance(s, DataFrame): + if not len(s.columns): + return DataFrame(index=s.index, columns=s.columns) + w = concat( + [ + _weights( + s.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na + ) + for i, _ in enumerate(s.columns) + ], + axis=1, + ) + w.index = s.index + w.columns = s.columns + return w + + w = Series(np.nan, index=s.index) + alpha = 1.0 / (1.0 + com) + if ignore_na: + w[s.notna()] = _weights( + s[s.notna()], com=com, adjust=adjust, ignore_na=False + ) + elif adjust: + for i in range(len(s)): + if s.iat[i] == s.iat[i]: + w.iat[i] = pow(1.0 / (1.0 - alpha), i) + else: + sum_wts = 0.0 + prev_i = -1 + for i in range(len(s)): + if s.iat[i] == s.iat[i]: + if prev_i == -1: + w.iat[i] = 1.0 + else: + w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, i - prev_i) + sum_wts += w.iat[i] + prev_i = i + return w + + def _variance_debiasing_factors(s, com, adjust, ignore_na): + weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) + cum_sum = weights.cumsum().fillna(method="ffill") + cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill") + numerator = cum_sum * cum_sum + denominator = numerator - cum_sum_sq + denominator[denominator <= 0.0] = np.nan + return numerator / denominator + + def _ewma(s, com, min_periods, adjust, ignore_na): + weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) + result = ( + s.multiply(weights) + .cumsum() + .divide(weights.cumsum()) + .fillna(method="ffill") + ) + result[ + s.expanding().count() < (max(min_periods, 1) if min_periods else 1) + ] = np.nan + return result + + com = 3.0 + # test consistency between different ewm* moments + self._test_moments_consistency( + min_periods=min_periods, + count=lambda x: x.expanding().count(), + mean=lambda x: x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).mean(), + mock_mean=lambda x: _ewma( + x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ), + corr=lambda x, y: x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).corr(y), + var_unbiased=lambda x: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=False) + ), + std_unbiased=lambda x: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).std(bias=False) + ), + cov_unbiased=lambda x, y: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).cov(y, bias=False) + ), + var_biased=lambda x: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=True) + ), + std_biased=lambda x: x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).std(bias=True), + cov_biased=lambda x, y: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).cov(y, bias=True) + ), + var_debiasing_factors=lambda x: ( + _variance_debiasing_factors( + x, com=com, adjust=adjust, ignore_na=ignore_na + ) + ), + ) diff --git a/pandas/tests/window/moments/test_moments_expanding.py b/pandas/tests/window/moments/test_moments_expanding.py new file mode 100644 index 0000000000000..3361ecab28669 --- /dev/null +++ b/pandas/tests/window/moments/test_moments_expanding.py @@ -0,0 +1,387 @@ +import warnings + +import numpy as np +from numpy.random import randn +import pytest + +from pandas import DataFrame, Index, MultiIndex, Series, isna, notna +from pandas.tests.window.common import ConsistencyBase +import pandas.util.testing as tm + + +class TestExpandingMomentsConsistency(ConsistencyBase): + def setup_method(self, method): + self._create_data() + + def test_expanding_apply_args_kwargs(self, raw): + def mean_w_arg(x, const): + return np.mean(x) + const + + df = DataFrame(np.random.rand(20, 3)) + + expected = df.expanding().apply(np.mean, raw=raw) + 20.0 + + result = df.expanding().apply(mean_w_arg, raw=raw, args=(20,)) + tm.assert_frame_equal(result, expected) + + result = df.expanding().apply(mean_w_arg, raw=raw, kwargs={"const": 20}) + tm.assert_frame_equal(result, expected) + + def test_expanding_corr(self): + A = self.series.dropna() + B = (A + randn(len(A)))[:-5] + + result = A.expanding().corr(B) + + rolling_result = A.rolling(window=len(A), min_periods=1).corr(B) + + tm.assert_almost_equal(rolling_result, result) + + def test_expanding_count(self): + result = self.series.expanding().count() + tm.assert_almost_equal( + result, self.series.rolling(window=len(self.series)).count() + ) + + def test_expanding_quantile(self): + result = self.series.expanding().quantile(0.5) + + rolling_result = self.series.rolling( + window=len(self.series), min_periods=1 + ).quantile(0.5) + + tm.assert_almost_equal(result, rolling_result) + + def test_expanding_cov(self): + A = self.series + B = (A + randn(len(A)))[:-5] + + result = A.expanding().cov(B) + + rolling_result = A.rolling(window=len(A), min_periods=1).cov(B) + + tm.assert_almost_equal(rolling_result, result) + + def test_expanding_cov_pairwise(self): + result = self.frame.expanding().corr() + + rolling_result = self.frame.rolling( + window=len(self.frame), min_periods=1 + ).corr() + + tm.assert_frame_equal(result, rolling_result) + + def test_expanding_corr_pairwise(self): + result = self.frame.expanding().corr() + + rolling_result = self.frame.rolling( + window=len(self.frame), min_periods=1 + ).corr() + tm.assert_frame_equal(result, rolling_result) + + def test_expanding_cov_diff_index(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = s1.expanding().cov(s2) + expected = Series([None, None, 2.0]) + tm.assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = s1.expanding().cov(s2a) + tm.assert_series_equal(result, expected) + + s1 = Series([7, 8, 10], index=[0, 1, 3]) + s2 = Series([7, 9, 10], index=[0, 2, 3]) + result = s1.expanding().cov(s2) + expected = Series([None, None, None, 4.5]) + tm.assert_series_equal(result, expected) + + def test_expanding_corr_diff_index(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = s1.expanding().corr(s2) + expected = Series([None, None, 1.0]) + tm.assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = s1.expanding().corr(s2a) + tm.assert_series_equal(result, expected) + + s1 = Series([7, 8, 10], index=[0, 1, 3]) + s2 = Series([7, 9, 10], index=[0, 2, 3]) + result = s1.expanding().corr(s2) + expected = Series([None, None, None, 1.0]) + tm.assert_series_equal(result, expected) + + def test_expanding_cov_pairwise_diff_length(self): + # GH 7512 + df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo")) + df1a = DataFrame( + [[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo") + ) + df2 = DataFrame( + [[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo") + ) + df2a = DataFrame( + [[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo") + ) + # TODO: xref gh-15826 + # .loc is not preserving the names + result1 = df1.expanding().cov(df2, pairwise=True).loc[2] + result2 = df1.expanding().cov(df2a, pairwise=True).loc[2] + result3 = df1a.expanding().cov(df2, pairwise=True).loc[2] + result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2] + expected = DataFrame( + [[-3.0, -6.0], [-5.0, -10.0]], + columns=Index(["A", "B"], name="foo"), + index=Index(["X", "Y"], name="foo"), + ) + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected) + tm.assert_frame_equal(result4, expected) + + def test_expanding_corr_pairwise_diff_length(self): + # GH 7512 + df1 = DataFrame( + [[1, 2], [3, 2], [3, 4]], + columns=["A", "B"], + index=Index(range(3), name="bar"), + ) + df1a = DataFrame( + [[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"] + ) + df2 = DataFrame( + [[5, 6], [None, None], [2, 1]], + columns=["X", "Y"], + index=Index(range(3), name="bar"), + ) + df2a = DataFrame( + [[5, 6], [2, 1]], index=Index([0, 2], name="bar"), columns=["X", "Y"] + ) + result1 = df1.expanding().corr(df2, pairwise=True).loc[2] + result2 = df1.expanding().corr(df2a, pairwise=True).loc[2] + result3 = df1a.expanding().corr(df2, pairwise=True).loc[2] + result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2] + expected = DataFrame( + [[-1.0, -1.0], [-1.0, -1.0]], columns=["A", "B"], index=Index(["X", "Y"]) + ) + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected) + tm.assert_frame_equal(result4, expected) + + @pytest.mark.parametrize( + "func,static_comp", + [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)], + ids=["sum", "mean", "max", "min"], + ) + def test_expanding_func(self, func, static_comp): + def expanding_func(x, min_periods=1, center=False, axis=0): + exp = x.expanding(min_periods=min_periods, center=center, axis=axis) + return getattr(exp, func)() + + self._check_expanding(expanding_func, static_comp, preserve_nan=False) + + def test_expanding_apply(self, raw): + def expanding_mean(x, min_periods=1): + + exp = x.expanding(min_periods=min_periods) + result = exp.apply(lambda x: x.mean(), raw=raw) + return result + + # TODO(jreback), needed to add preserve_nan=False + # here to make this pass + self._check_expanding(expanding_mean, np.mean, preserve_nan=False) + + ser = Series([], dtype=np.float64) + tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean(), raw=raw)) + + # GH 8080 + s = Series([None, None, None]) + result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw) + expected = Series([1.0, 2.0, 3.0]) + tm.assert_series_equal(result, expected) + + def _check_expanding( + self, func, static_comp, has_min_periods=True, preserve_nan=True + ): + + series_result = func(self.series) + assert isinstance(series_result, Series) + frame_result = func(self.frame) + assert isinstance(frame_result, DataFrame) + + result = func(self.series) + tm.assert_almost_equal(result[10], static_comp(self.series[:11])) + + if preserve_nan: + assert result.iloc[self._nan_locs].isna().all() + + ser = Series(randn(50)) + + if has_min_periods: + result = func(ser, min_periods=30) + assert result[:29].isna().all() + tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) + + # min_periods is working correctly + result = func(ser, min_periods=15) + assert isna(result.iloc[13]) + assert notna(result.iloc[14]) + + ser2 = Series(randn(20)) + result = func(ser2, min_periods=5) + assert isna(result[3]) + assert notna(result[4]) + + # min_periods=0 + result0 = func(ser, min_periods=0) + result1 = func(ser, min_periods=1) + tm.assert_almost_equal(result0, result1) + else: + result = func(ser) + tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) + + def test_moment_functions_zero_length(self): + # GH 8056 + s = Series(dtype=np.float64) + s_expected = s + df1 = DataFrame() + df1_expected = df1 + df2 = DataFrame(columns=["a"]) + df2["a"] = df2["a"].astype("float64") + df2_expected = df2 + + functions = [ + lambda x: x.expanding().count(), + lambda x: x.expanding(min_periods=5).cov(x, pairwise=False), + lambda x: x.expanding(min_periods=5).corr(x, pairwise=False), + lambda x: x.expanding(min_periods=5).max(), + lambda x: x.expanding(min_periods=5).min(), + lambda x: x.expanding(min_periods=5).sum(), + lambda x: x.expanding(min_periods=5).mean(), + lambda x: x.expanding(min_periods=5).std(), + lambda x: x.expanding(min_periods=5).var(), + lambda x: x.expanding(min_periods=5).skew(), + lambda x: x.expanding(min_periods=5).kurt(), + lambda x: x.expanding(min_periods=5).quantile(0.5), + lambda x: x.expanding(min_periods=5).median(), + lambda x: x.expanding(min_periods=5).apply(sum, raw=False), + lambda x: x.expanding(min_periods=5).apply(sum, raw=True), + ] + for f in functions: + try: + s_result = f(s) + tm.assert_series_equal(s_result, s_expected) + + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) + + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) + except (ImportError): + + # scipy needed for rolling_window + continue + + def test_moment_functions_zero_length_pairwise(self): + + df1 = DataFrame() + df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar")) + df2["a"] = df2["a"].astype("float64") + + df1_expected = DataFrame( + index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([]) + ) + df2_expected = DataFrame( + index=MultiIndex.from_product( + [df2.index, df2.columns], names=["bar", "foo"] + ), + columns=Index(["a"], name="foo"), + dtype="float64", + ) + functions = [ + lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)), + lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)), + ] + for f in functions: + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) + + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) + + @pytest.mark.slow + @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) + def test_expanding_consistency(self, min_periods): + + # suppress warnings about empty slices, as we are deliberately testing + # with empty/0-length Series/DataFrames + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=".*(empty slice|0 for slice).*", + category=RuntimeWarning, + ) + + # test consistency between different expanding_* moments + self._test_moments_consistency( + min_periods=min_periods, + count=lambda x: x.expanding().count(), + mean=lambda x: x.expanding(min_periods=min_periods).mean(), + mock_mean=lambda x: x.expanding(min_periods=min_periods).sum() + / x.expanding().count(), + corr=lambda x, y: x.expanding(min_periods=min_periods).corr(y), + var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), + std_unbiased=lambda x: x.expanding(min_periods=min_periods).std(), + cov_unbiased=lambda x, y: x.expanding(min_periods=min_periods).cov(y), + var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), + std_biased=lambda x: x.expanding(min_periods=min_periods).std(ddof=0), + cov_biased=lambda x, y: x.expanding(min_periods=min_periods).cov( + y, ddof=0 + ), + var_debiasing_factors=lambda x: ( + x.expanding().count() + / (x.expanding().count() - 1.0).replace(0.0, np.nan) + ), + ) + + # test consistency between expanding_xyz() and either (a) + # expanding_apply of Series.xyz(), or (b) expanding_apply of + # np.nanxyz() + for (x, is_constant, no_nans) in self.data: + functions = self.base_functions + + # GH 8269 + if no_nans: + functions = self.base_functions + self.no_nan_functions + for (f, require_min_periods, name) in functions: + expanding_f = getattr(x.expanding(min_periods=min_periods), name) + + if ( + require_min_periods + and (min_periods is not None) + and (min_periods < require_min_periods) + ): + continue + + if name == "count": + expanding_f_result = expanding_f() + expanding_apply_f_result = x.expanding(min_periods=0).apply( + func=f, raw=True + ) + else: + if name in ["cov", "corr"]: + expanding_f_result = expanding_f(pairwise=False) + else: + expanding_f_result = expanding_f() + expanding_apply_f_result = x.expanding( + min_periods=min_periods + ).apply(func=f, raw=True) + + # GH 9422 + if name in ["sum", "prod"]: + tm.assert_equal(expanding_f_result, expanding_apply_f_result) diff --git a/pandas/tests/window/test_moments.py b/pandas/tests/window/moments/test_moments_rolling.py similarity index 55% rename from pandas/tests/window/test_moments.py rename to pandas/tests/window/moments/test_moments_rolling.py index b1c5fc429cc03..631b13f874ca7 100644 --- a/pandas/tests/window/test_moments.py +++ b/pandas/tests/window/moments/test_moments_rolling.py @@ -9,9 +9,9 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, Index, Series, concat, isna, notna +from pandas import DataFrame, Index, Series, isna, notna from pandas.core.window.common import _flex_binary_moment -from pandas.tests.window.common import Base +from pandas.tests.window.common import Base, ConsistencyBase import pandas.util.testing as tm import pandas.tseries.offsets as offsets @@ -915,400 +915,6 @@ def get_result(obj, window, min_periods=None, center=False): tm.assert_series_equal(series_xp, series_rs) tm.assert_frame_equal(frame_xp, frame_rs) - def test_ewma(self): - self._check_ew(name="mean") - - vals = pd.Series(np.zeros(1000)) - vals[5] = 1 - result = vals.ewm(span=100, adjust=False).mean().sum() - assert np.abs(result - 1) < 1e-2 - - @pytest.mark.parametrize("adjust", [True, False]) - @pytest.mark.parametrize("ignore_na", [True, False]) - def test_ewma_cases(self, adjust, ignore_na): - # try adjust/ignore_na args matrix - - s = Series([1.0, 2.0, 4.0, 8.0]) - - if adjust: - expected = Series([1.0, 1.6, 2.736842, 4.923077]) - else: - expected = Series([1.0, 1.333333, 2.222222, 4.148148]) - - result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() - tm.assert_series_equal(result, expected) - - def test_ewma_nan_handling(self): - s = Series([1.0] + [np.nan] * 5 + [1.0]) - result = s.ewm(com=5).mean() - tm.assert_series_equal(result, Series([1.0] * len(s))) - - s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0]) - result = s.ewm(com=5).mean() - tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4)) - - # GH 7603 - s0 = Series([np.nan, 1.0, 101.0]) - s1 = Series([1.0, np.nan, 101.0]) - s2 = Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]) - s3 = Series([1.0, np.nan, 101.0, 50.0]) - com = 2.0 - alpha = 1.0 / (1.0 + com) - - def simple_wma(s, w): - return (s.multiply(w).cumsum() / w.cumsum()).fillna(method="ffill") - - for (s, adjust, ignore_na, w) in [ - (s0, True, False, [np.nan, (1.0 - alpha), 1.0]), - (s0, True, True, [np.nan, (1.0 - alpha), 1.0]), - (s0, False, False, [np.nan, (1.0 - alpha), alpha]), - (s0, False, True, [np.nan, (1.0 - alpha), alpha]), - (s1, True, False, [(1.0 - alpha) ** 2, np.nan, 1.0]), - (s1, True, True, [(1.0 - alpha), np.nan, 1.0]), - (s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]), - (s1, False, True, [(1.0 - alpha), np.nan, alpha]), - ( - s2, - True, - False, - [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan], - ), - (s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]), - ( - s2, - False, - False, - [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, alpha, np.nan], - ), - (s2, False, True, [np.nan, (1.0 - alpha), np.nan, np.nan, alpha, np.nan]), - (s3, True, False, [(1.0 - alpha) ** 3, np.nan, (1.0 - alpha), 1.0]), - (s3, True, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha), 1.0]), - ( - s3, - False, - False, - [ - (1.0 - alpha) ** 3, - np.nan, - (1.0 - alpha) * alpha, - alpha * ((1.0 - alpha) ** 2 + alpha), - ], - ), - ( - s3, - False, - True, - [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha], - ), - ]: - expected = simple_wma(s, Series(w)) - result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean() - - tm.assert_series_equal(result, expected) - if ignore_na is False: - # check that ignore_na defaults to False - result = s.ewm(com=com, adjust=adjust).mean() - tm.assert_series_equal(result, expected) - - def test_ewmvar(self): - self._check_ew(name="var") - - def test_ewmvol(self): - self._check_ew(name="vol") - - def test_ewma_span_com_args(self): - A = self.series.ewm(com=9.5).mean() - B = self.series.ewm(span=20).mean() - tm.assert_almost_equal(A, B) - - with pytest.raises(ValueError): - self.series.ewm(com=9.5, span=20) - with pytest.raises(ValueError): - self.series.ewm().mean() - - def test_ewma_halflife_arg(self): - A = self.series.ewm(com=13.932726172912965).mean() - B = self.series.ewm(halflife=10.0).mean() - tm.assert_almost_equal(A, B) - - with pytest.raises(ValueError): - self.series.ewm(span=20, halflife=50) - with pytest.raises(ValueError): - self.series.ewm(com=9.5, halflife=50) - with pytest.raises(ValueError): - self.series.ewm(com=9.5, span=20, halflife=50) - with pytest.raises(ValueError): - self.series.ewm() - - def test_ewm_alpha(self): - # GH 10789 - s = Series(self.arr) - a = s.ewm(alpha=0.61722699889169674).mean() - b = s.ewm(com=0.62014947789973052).mean() - c = s.ewm(span=2.240298955799461).mean() - d = s.ewm(halflife=0.721792864318).mean() - tm.assert_series_equal(a, b) - tm.assert_series_equal(a, c) - tm.assert_series_equal(a, d) - - def test_ewm_alpha_arg(self): - # GH 10789 - s = self.series - with pytest.raises(ValueError): - s.ewm() - with pytest.raises(ValueError): - s.ewm(com=10.0, alpha=0.5) - with pytest.raises(ValueError): - s.ewm(span=10.0, alpha=0.5) - with pytest.raises(ValueError): - s.ewm(halflife=10.0, alpha=0.5) - - def test_ewm_domain_checks(self): - # GH 12492 - s = Series(self.arr) - msg = "comass must satisfy: comass >= 0" - with pytest.raises(ValueError, match=msg): - s.ewm(com=-0.1) - s.ewm(com=0.0) - s.ewm(com=0.1) - - msg = "span must satisfy: span >= 1" - with pytest.raises(ValueError, match=msg): - s.ewm(span=-0.1) - with pytest.raises(ValueError, match=msg): - s.ewm(span=0.0) - with pytest.raises(ValueError, match=msg): - s.ewm(span=0.9) - s.ewm(span=1.0) - s.ewm(span=1.1) - - msg = "halflife must satisfy: halflife > 0" - with pytest.raises(ValueError, match=msg): - s.ewm(halflife=-0.1) - with pytest.raises(ValueError, match=msg): - s.ewm(halflife=0.0) - s.ewm(halflife=0.1) - - msg = "alpha must satisfy: 0 < alpha <= 1" - with pytest.raises(ValueError, match=msg): - s.ewm(alpha=-0.1) - with pytest.raises(ValueError, match=msg): - s.ewm(alpha=0.0) - s.ewm(alpha=0.1) - s.ewm(alpha=1.0) - with pytest.raises(ValueError, match=msg): - s.ewm(alpha=1.1) - - @pytest.mark.parametrize("method", ["mean", "vol", "var"]) - def test_ew_empty_series(self, method): - vals = pd.Series([], dtype=np.float64) - - ewm = vals.ewm(3) - result = getattr(ewm, method)() - tm.assert_almost_equal(result, vals) - - def _check_ew(self, name=None, preserve_nan=False): - series_result = getattr(self.series.ewm(com=10), name)() - assert isinstance(series_result, Series) - - frame_result = getattr(self.frame.ewm(com=10), name)() - assert type(frame_result) == DataFrame - - result = getattr(self.series.ewm(com=10), name)() - if preserve_nan: - assert result[self._nan_locs].isna().all() - - # excluding NaNs correctly - arr = randn(50) - arr[:10] = np.NaN - arr[-10:] = np.NaN - s = Series(arr) - - # check min_periods - # GH 7898 - result = getattr(s.ewm(com=50, min_periods=2), name)() - assert result[:11].isna().all() - assert not result[11:].isna().any() - - for min_periods in (0, 1): - result = getattr(s.ewm(com=50, min_periods=min_periods), name)() - if name == "mean": - assert result[:10].isna().all() - assert not result[10:].isna().any() - else: - # ewm.std, ewm.vol, ewm.var (with bias=False) require at least - # two values - assert result[:11].isna().all() - assert not result[11:].isna().any() - - # check series of length 0 - result = getattr( - Series(dtype=object).ewm(com=50, min_periods=min_periods), name - )() - tm.assert_series_equal(result, Series(dtype="float64")) - - # check series of length 1 - result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)() - if name == "mean": - tm.assert_series_equal(result, Series([1.0])) - else: - # ewm.std, ewm.vol, ewm.var with bias=False require at least - # two values - tm.assert_series_equal(result, Series([np.NaN])) - - # pass in ints - result2 = getattr(Series(np.arange(50)).ewm(span=10), name)() - assert result2.dtype == np.float_ - - -# create the data only once as we are not setting it -def _create_consistency_data(): - def create_series(): - return [ - Series(dtype=object), - Series([np.nan]), - Series([np.nan, np.nan]), - Series([3.0]), - Series([np.nan, 3.0]), - Series([3.0, np.nan]), - Series([1.0, 3.0]), - Series([2.0, 2.0]), - Series([3.0, 1.0]), - Series( - [5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan] - ), - Series( - [ - np.nan, - 5.0, - 5.0, - 5.0, - np.nan, - np.nan, - np.nan, - 5.0, - 5.0, - np.nan, - np.nan, - ] - ), - Series( - [ - np.nan, - np.nan, - 5.0, - 5.0, - np.nan, - np.nan, - np.nan, - 5.0, - 5.0, - np.nan, - np.nan, - ] - ), - Series( - [ - np.nan, - 3.0, - np.nan, - 3.0, - 4.0, - 5.0, - 6.0, - np.nan, - np.nan, - 7.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series( - [ - np.nan, - 5.0, - np.nan, - 2.0, - 4.0, - 0.0, - 9.0, - np.nan, - np.nan, - 3.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series( - [ - 2.0, - 3.0, - np.nan, - 3.0, - 4.0, - 5.0, - 6.0, - np.nan, - np.nan, - 7.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series( - [ - 2.0, - 5.0, - np.nan, - 2.0, - 4.0, - 0.0, - 9.0, - np.nan, - np.nan, - 3.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series(range(10)), - Series(range(20, 0, -2)), - ] - - def create_dataframes(): - return [ - DataFrame(), - DataFrame(columns=["a"]), - DataFrame(columns=["a", "a"]), - DataFrame(columns=["a", "b"]), - DataFrame(np.arange(10).reshape((5, 2))), - DataFrame(np.arange(25).reshape((5, 5))), - DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]), - ] + [DataFrame(s) for s in create_series()] - - def is_constant(x): - values = x.values.ravel() - return len(set(values[notna(values)])) == 1 - - def no_nans(x): - return x.notna().all().all() - - # data is a tuple(object, is_constant, no_nans) - data = create_series() + create_dataframes() - - return [(x, is_constant(x), no_nans(x)) for x in data] - - -_consistency_data = _create_consistency_data() - def _rolling_consistency_cases(): for window in [1, 2, 3, 10, 20]: @@ -1319,363 +925,10 @@ def _rolling_consistency_cases(): yield window, min_periods, center -class TestMomentsConsistency(Base): - base_functions = [ - (lambda v: Series(v).count(), None, "count"), - (lambda v: Series(v).max(), None, "max"), - (lambda v: Series(v).min(), None, "min"), - (lambda v: Series(v).sum(), None, "sum"), - (lambda v: Series(v).mean(), None, "mean"), - (lambda v: Series(v).std(), 1, "std"), - (lambda v: Series(v).cov(Series(v)), None, "cov"), - (lambda v: Series(v).corr(Series(v)), None, "corr"), - (lambda v: Series(v).var(), 1, "var"), - # restore once GH 8086 is fixed - # lambda v: Series(v).skew(), 3, 'skew'), - # (lambda v: Series(v).kurt(), 4, 'kurt'), - # restore once GH 8084 is fixed - # lambda v: Series(v).quantile(0.3), None, 'quantile'), - (lambda v: Series(v).median(), None, "median"), - (np.nanmax, 1, "max"), - (np.nanmin, 1, "min"), - (np.nansum, 1, "sum"), - (np.nanmean, 1, "mean"), - (lambda v: np.nanstd(v, ddof=1), 1, "std"), - (lambda v: np.nanvar(v, ddof=1), 1, "var"), - (np.nanmedian, 1, "median"), - ] - no_nan_functions = [ - (np.max, None, "max"), - (np.min, None, "min"), - (np.sum, None, "sum"), - (np.mean, None, "mean"), - (lambda v: np.std(v, ddof=1), 1, "std"), - (lambda v: np.var(v, ddof=1), 1, "var"), - (np.median, None, "median"), - ] - - def _create_data(self): - super()._create_data() - self.data = _consistency_data - +class TestRollingMomentsConsistency(ConsistencyBase): def setup_method(self, method): self._create_data() - def _test_moments_consistency( - self, - min_periods, - count, - mean, - mock_mean, - corr, - var_unbiased=None, - std_unbiased=None, - cov_unbiased=None, - var_biased=None, - std_biased=None, - cov_biased=None, - var_debiasing_factors=None, - ): - def _non_null_values(x): - values = x.values.ravel() - return set(values[notna(values)].tolist()) - - for (x, is_constant, no_nans) in self.data: - count_x = count(x) - mean_x = mean(x) - - if mock_mean: - # check that mean equals mock_mean - expected = mock_mean(x) - tm.assert_equal(mean_x, expected.astype("float64")) - - # check that correlation of a series with itself is either 1 or NaN - corr_x_x = corr(x, x) - - # assert _non_null_values(corr_x_x).issubset(set([1.])) - # restore once rolling_cov(x, x) is identically equal to var(x) - - if is_constant: - exp = x.max() if isinstance(x, Series) else x.max().max() - - # check mean of constant series - expected = x * np.nan - expected[count_x >= max(min_periods, 1)] = exp - tm.assert_equal(mean_x, expected) - - # check correlation of constant series with itself is NaN - expected[:] = np.nan - tm.assert_equal(corr_x_x, expected) - - if var_unbiased and var_biased and var_debiasing_factors: - # check variance debiasing factors - var_unbiased_x = var_unbiased(x) - var_biased_x = var_biased(x) - var_debiasing_factors_x = var_debiasing_factors(x) - tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) - - for (std, var, cov) in [ - (std_biased, var_biased, cov_biased), - (std_unbiased, var_unbiased, cov_unbiased), - ]: - - # check that var(x), std(x), and cov(x) are all >= 0 - var_x = var(x) - std_x = std(x) - assert not (var_x < 0).any().any() - assert not (std_x < 0).any().any() - if cov: - cov_x_x = cov(x, x) - assert not (cov_x_x < 0).any().any() - - # check that var(x) == cov(x, x) - tm.assert_equal(var_x, cov_x_x) - - # check that var(x) == std(x)^2 - tm.assert_equal(var_x, std_x * std_x) - - if var is var_biased: - # check that biased var(x) == mean(x^2) - mean(x)^2 - mean_x2 = mean(x * x) - tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) - - if is_constant: - # check that variance of constant series is identically 0 - assert not (var_x > 0).any().any() - expected = x * np.nan - expected[count_x >= max(min_periods, 1)] = 0.0 - if var is var_unbiased: - expected[count_x < 2] = np.nan - tm.assert_equal(var_x, expected) - - if isinstance(x, Series): - for (y, is_constant, no_nans) in self.data: - if not x.isna().equals(y.isna()): - # can only easily test two Series with similar - # structure - continue - - # check that cor(x, y) is symmetric - corr_x_y = corr(x, y) - corr_y_x = corr(y, x) - tm.assert_equal(corr_x_y, corr_y_x) - - if cov: - # check that cov(x, y) is symmetric - cov_x_y = cov(x, y) - cov_y_x = cov(y, x) - tm.assert_equal(cov_x_y, cov_y_x) - - # check that cov(x, y) == (var(x+y) - var(x) - - # var(y)) / 2 - var_x_plus_y = var(x + y) - var_y = var(y) - tm.assert_equal( - cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y) - ) - - # check that corr(x, y) == cov(x, y) / (std(x) * - # std(y)) - std_y = std(y) - tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) - - if cov is cov_biased: - # check that biased cov(x, y) == mean(x*y) - - # mean(x)*mean(y) - mean_y = mean(y) - mean_x_times_y = mean(x * y) - tm.assert_equal( - cov_x_y, mean_x_times_y - (mean_x * mean_y) - ) - - @pytest.mark.slow - @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) - @pytest.mark.parametrize("adjust", [True, False]) - @pytest.mark.parametrize("ignore_na", [True, False]) - def test_ewm_consistency(self, min_periods, adjust, ignore_na): - def _weights(s, com, adjust, ignore_na): - if isinstance(s, DataFrame): - if not len(s.columns): - return DataFrame(index=s.index, columns=s.columns) - w = concat( - [ - _weights( - s.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na - ) - for i, _ in enumerate(s.columns) - ], - axis=1, - ) - w.index = s.index - w.columns = s.columns - return w - - w = Series(np.nan, index=s.index) - alpha = 1.0 / (1.0 + com) - if ignore_na: - w[s.notna()] = _weights( - s[s.notna()], com=com, adjust=adjust, ignore_na=False - ) - elif adjust: - for i in range(len(s)): - if s.iat[i] == s.iat[i]: - w.iat[i] = pow(1.0 / (1.0 - alpha), i) - else: - sum_wts = 0.0 - prev_i = -1 - for i in range(len(s)): - if s.iat[i] == s.iat[i]: - if prev_i == -1: - w.iat[i] = 1.0 - else: - w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, i - prev_i) - sum_wts += w.iat[i] - prev_i = i - return w - - def _variance_debiasing_factors(s, com, adjust, ignore_na): - weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) - cum_sum = weights.cumsum().fillna(method="ffill") - cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill") - numerator = cum_sum * cum_sum - denominator = numerator - cum_sum_sq - denominator[denominator <= 0.0] = np.nan - return numerator / denominator - - def _ewma(s, com, min_periods, adjust, ignore_na): - weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) - result = ( - s.multiply(weights) - .cumsum() - .divide(weights.cumsum()) - .fillna(method="ffill") - ) - result[ - s.expanding().count() < (max(min_periods, 1) if min_periods else 1) - ] = np.nan - return result - - com = 3.0 - # test consistency between different ewm* moments - self._test_moments_consistency( - min_periods=min_periods, - count=lambda x: x.expanding().count(), - mean=lambda x: x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).mean(), - mock_mean=lambda x: _ewma( - x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ), - corr=lambda x, y: x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).corr(y), - var_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=False) - ), - std_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).std(bias=False) - ), - cov_unbiased=lambda x, y: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).cov(y, bias=False) - ), - var_biased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=True) - ), - std_biased=lambda x: x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).std(bias=True), - cov_biased=lambda x, y: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).cov(y, bias=True) - ), - var_debiasing_factors=lambda x: ( - _variance_debiasing_factors( - x, com=com, adjust=adjust, ignore_na=ignore_na - ) - ), - ) - - @pytest.mark.slow - @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) - def test_expanding_consistency(self, min_periods): - - # suppress warnings about empty slices, as we are deliberately testing - # with empty/0-length Series/DataFrames - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - message=".*(empty slice|0 for slice).*", - category=RuntimeWarning, - ) - - # test consistency between different expanding_* moments - self._test_moments_consistency( - min_periods=min_periods, - count=lambda x: x.expanding().count(), - mean=lambda x: x.expanding(min_periods=min_periods).mean(), - mock_mean=lambda x: x.expanding(min_periods=min_periods).sum() - / x.expanding().count(), - corr=lambda x, y: x.expanding(min_periods=min_periods).corr(y), - var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), - std_unbiased=lambda x: x.expanding(min_periods=min_periods).std(), - cov_unbiased=lambda x, y: x.expanding(min_periods=min_periods).cov(y), - var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), - std_biased=lambda x: x.expanding(min_periods=min_periods).std(ddof=0), - cov_biased=lambda x, y: x.expanding(min_periods=min_periods).cov( - y, ddof=0 - ), - var_debiasing_factors=lambda x: ( - x.expanding().count() - / (x.expanding().count() - 1.0).replace(0.0, np.nan) - ), - ) - - # test consistency between expanding_xyz() and either (a) - # expanding_apply of Series.xyz(), or (b) expanding_apply of - # np.nanxyz() - for (x, is_constant, no_nans) in self.data: - functions = self.base_functions - - # GH 8269 - if no_nans: - functions = self.base_functions + self.no_nan_functions - for (f, require_min_periods, name) in functions: - expanding_f = getattr(x.expanding(min_periods=min_periods), name) - - if ( - require_min_periods - and (min_periods is not None) - and (min_periods < require_min_periods) - ): - continue - - if name == "count": - expanding_f_result = expanding_f() - expanding_apply_f_result = x.expanding(min_periods=0).apply( - func=f, raw=True - ) - else: - if name in ["cov", "corr"]: - expanding_f_result = expanding_f(pairwise=False) - else: - expanding_f_result = expanding_f() - expanding_apply_f_result = x.expanding( - min_periods=min_periods - ).apply(func=f, raw=True) - - # GH 9422 - if name in ["sum", "prod"]: - tm.assert_equal(expanding_f_result, expanding_apply_f_result) - @pytest.mark.slow @pytest.mark.parametrize( "window,min_periods,center", list(_rolling_consistency_cases()) @@ -1835,16 +1088,6 @@ def test_rolling_corr_with_zero_variance(self, window): assert s.rolling(window=window).corr(other=other).isna().all() - def _check_pairwise_moment(self, dispatch, name, **kwargs): - def get_result(obj, obj2=None): - return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2) - - result = get_result(self.frame) - result = result.loc[(slice(None), 1), 5] - result.index = result.index.droplevel(1) - expected = get_result(self.frame[1], self.frame[5]) - tm.assert_series_equal(result, expected, check_names=False) - def test_flex_binary_moment(self): # GH3155 # don't blow the stack @@ -1905,156 +1148,6 @@ def test_flex_binary_frame(self, method): ) tm.assert_frame_equal(res3, exp) - def test_ewmcov(self): - self._check_binary_ew("cov") - - def test_ewmcov_pairwise(self): - self._check_pairwise_moment("ewm", "cov", span=10, min_periods=5) - - def test_ewmcorr(self): - self._check_binary_ew("corr") - - def test_ewmcorr_pairwise(self): - self._check_pairwise_moment("ewm", "corr", span=10, min_periods=5) - - def _check_binary_ew(self, name): - def func(A, B, com, **kwargs): - return getattr(A.ewm(com, **kwargs), name)(B) - - A = Series(randn(50), index=np.arange(50)) - B = A[2:] + randn(48) - - A[:10] = np.NaN - B[-10:] = np.NaN - - result = func(A, B, 20, min_periods=5) - assert np.isnan(result.values[:14]).all() - assert not np.isnan(result.values[14:]).any() - - # GH 7898 - for min_periods in (0, 1, 2): - result = func(A, B, 20, min_periods=min_periods) - # binary functions (ewmcov, ewmcorr) with bias=False require at - # least two values - assert np.isnan(result.values[:11]).all() - assert not np.isnan(result.values[11:]).any() - - # check series of length 0 - empty = Series([], dtype=np.float64) - result = func(empty, empty, 50, min_periods=min_periods) - tm.assert_series_equal(result, empty) - - # check series of length 1 - result = func(Series([1.0]), Series([1.0]), 50, min_periods=min_periods) - tm.assert_series_equal(result, Series([np.NaN])) - - msg = "Input arrays must be of the same type!" - # exception raised is Exception - with pytest.raises(Exception, match=msg): - func(A, randn(50), 20, min_periods=5) - - def test_expanding_apply_args_kwargs(self, raw): - def mean_w_arg(x, const): - return np.mean(x) + const - - df = DataFrame(np.random.rand(20, 3)) - - expected = df.expanding().apply(np.mean, raw=raw) + 20.0 - - result = df.expanding().apply(mean_w_arg, raw=raw, args=(20,)) - tm.assert_frame_equal(result, expected) - - result = df.expanding().apply(mean_w_arg, raw=raw, kwargs={"const": 20}) - tm.assert_frame_equal(result, expected) - - def test_expanding_corr(self): - A = self.series.dropna() - B = (A + randn(len(A)))[:-5] - - result = A.expanding().corr(B) - - rolling_result = A.rolling(window=len(A), min_periods=1).corr(B) - - tm.assert_almost_equal(rolling_result, result) - - def test_expanding_count(self): - result = self.series.expanding().count() - tm.assert_almost_equal( - result, self.series.rolling(window=len(self.series)).count() - ) - - def test_expanding_quantile(self): - result = self.series.expanding().quantile(0.5) - - rolling_result = self.series.rolling( - window=len(self.series), min_periods=1 - ).quantile(0.5) - - tm.assert_almost_equal(result, rolling_result) - - def test_expanding_cov(self): - A = self.series - B = (A + randn(len(A)))[:-5] - - result = A.expanding().cov(B) - - rolling_result = A.rolling(window=len(A), min_periods=1).cov(B) - - tm.assert_almost_equal(rolling_result, result) - - def test_expanding_cov_pairwise(self): - result = self.frame.expanding().corr() - - rolling_result = self.frame.rolling( - window=len(self.frame), min_periods=1 - ).corr() - - tm.assert_frame_equal(result, rolling_result) - - def test_expanding_corr_pairwise(self): - result = self.frame.expanding().corr() - - rolling_result = self.frame.rolling( - window=len(self.frame), min_periods=1 - ).corr() - tm.assert_frame_equal(result, rolling_result) - - def test_expanding_cov_diff_index(self): - # GH 7512 - s1 = Series([1, 2, 3], index=[0, 1, 2]) - s2 = Series([1, 3], index=[0, 2]) - result = s1.expanding().cov(s2) - expected = Series([None, None, 2.0]) - tm.assert_series_equal(result, expected) - - s2a = Series([1, None, 3], index=[0, 1, 2]) - result = s1.expanding().cov(s2a) - tm.assert_series_equal(result, expected) - - s1 = Series([7, 8, 10], index=[0, 1, 3]) - s2 = Series([7, 9, 10], index=[0, 2, 3]) - result = s1.expanding().cov(s2) - expected = Series([None, None, None, 4.5]) - tm.assert_series_equal(result, expected) - - def test_expanding_corr_diff_index(self): - # GH 7512 - s1 = Series([1, 2, 3], index=[0, 1, 2]) - s2 = Series([1, 3], index=[0, 2]) - result = s1.expanding().corr(s2) - expected = Series([None, None, 1.0]) - tm.assert_series_equal(result, expected) - - s2a = Series([1, None, 3], index=[0, 1, 2]) - result = s1.expanding().corr(s2a) - tm.assert_series_equal(result, expected) - - s1 = Series([7, 8, 10], index=[0, 1, 3]) - s2 = Series([7, 9, 10], index=[0, 2, 3]) - result = s1.expanding().corr(s2) - expected = Series([None, None, None, 1.0]) - tm.assert_series_equal(result, expected) - def test_rolling_cov_diff_length(self): # GH 7512 s1 = Series([1, 2, 3], index=[0, 1, 2]) @@ -2082,8 +1175,8 @@ def test_rolling_corr_diff_length(self): @pytest.mark.parametrize( "f", [ - lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=False)), - lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=False)), + lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False), + lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False), lambda x: x.rolling(window=10, min_periods=5).max(), lambda x: x.rolling(window=10, min_periods=5).min(), lambda x: x.rolling(window=10, min_periods=5).sum(), @@ -2136,154 +1229,6 @@ def test_rolling_functions_window_non_shrinkage_binary(self): df_result = f(df) tm.assert_frame_equal(df_result, df_expected) - def test_moment_functions_zero_length(self): - # GH 8056 - s = Series(dtype=np.float64) - s_expected = s - df1 = DataFrame() - df1_expected = df1 - df2 = DataFrame(columns=["a"]) - df2["a"] = df2["a"].astype("float64") - df2_expected = df2 - - functions = [ - lambda x: x.expanding().count(), - lambda x: x.expanding(min_periods=5).cov(x, pairwise=False), - lambda x: x.expanding(min_periods=5).corr(x, pairwise=False), - lambda x: x.expanding(min_periods=5).max(), - lambda x: x.expanding(min_periods=5).min(), - lambda x: x.expanding(min_periods=5).sum(), - lambda x: x.expanding(min_periods=5).mean(), - lambda x: x.expanding(min_periods=5).std(), - lambda x: x.expanding(min_periods=5).var(), - lambda x: x.expanding(min_periods=5).skew(), - lambda x: x.expanding(min_periods=5).kurt(), - lambda x: x.expanding(min_periods=5).quantile(0.5), - lambda x: x.expanding(min_periods=5).median(), - lambda x: x.expanding(min_periods=5).apply(sum, raw=False), - lambda x: x.expanding(min_periods=5).apply(sum, raw=True), - lambda x: x.rolling(window=10).count(), - lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False), - lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False), - lambda x: x.rolling(window=10, min_periods=5).max(), - lambda x: x.rolling(window=10, min_periods=5).min(), - lambda x: x.rolling(window=10, min_periods=5).sum(), - lambda x: x.rolling(window=10, min_periods=5).mean(), - lambda x: x.rolling(window=10, min_periods=5).std(), - lambda x: x.rolling(window=10, min_periods=5).var(), - lambda x: x.rolling(window=10, min_periods=5).skew(), - lambda x: x.rolling(window=10, min_periods=5).kurt(), - lambda x: x.rolling(window=10, min_periods=5).quantile(0.5), - lambda x: x.rolling(window=10, min_periods=5).median(), - lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), - lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), - lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), - ] - for f in functions: - try: - s_result = f(s) - tm.assert_series_equal(s_result, s_expected) - - df1_result = f(df1) - tm.assert_frame_equal(df1_result, df1_expected) - - df2_result = f(df2) - tm.assert_frame_equal(df2_result, df2_expected) - except (ImportError): - - # scipy needed for rolling_window - continue - - def test_moment_functions_zero_length_pairwise(self): - - df1 = DataFrame() - df1_expected = df1 - df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar")) - df2["a"] = df2["a"].astype("float64") - - df1_expected = DataFrame( - index=pd.MultiIndex.from_product([df1.index, df1.columns]), - columns=Index([]), - ) - df2_expected = DataFrame( - index=pd.MultiIndex.from_product( - [df2.index, df2.columns], names=["bar", "foo"] - ), - columns=Index(["a"], name="foo"), - dtype="float64", - ) - - functions = [ - lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)), - lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)), - lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), - lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), - ] - for f in functions: - df1_result = f(df1) - tm.assert_frame_equal(df1_result, df1_expected) - - df2_result = f(df2) - tm.assert_frame_equal(df2_result, df2_expected) - - def test_expanding_cov_pairwise_diff_length(self): - # GH 7512 - df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo")) - df1a = DataFrame( - [[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo") - ) - df2 = DataFrame( - [[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo") - ) - df2a = DataFrame( - [[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo") - ) - # TODO: xref gh-15826 - # .loc is not preserving the names - result1 = df1.expanding().cov(df2a, pairwise=True).loc[2] - result2 = df1.expanding().cov(df2a, pairwise=True).loc[2] - result3 = df1a.expanding().cov(df2, pairwise=True).loc[2] - result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2] - expected = DataFrame( - [[-3.0, -6.0], [-5.0, -10.0]], - columns=Index(["A", "B"], name="foo"), - index=Index(["X", "Y"], name="foo"), - ) - tm.assert_frame_equal(result1, expected) - tm.assert_frame_equal(result2, expected) - tm.assert_frame_equal(result3, expected) - tm.assert_frame_equal(result4, expected) - - def test_expanding_corr_pairwise_diff_length(self): - # GH 7512 - df1 = DataFrame( - [[1, 2], [3, 2], [3, 4]], - columns=["A", "B"], - index=Index(range(3), name="bar"), - ) - df1a = DataFrame( - [[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"] - ) - df2 = DataFrame( - [[5, 6], [None, None], [2, 1]], - columns=["X", "Y"], - index=Index(range(3), name="bar"), - ) - df2a = DataFrame( - [[5, 6], [2, 1]], index=Index([0, 2], name="bar"), columns=["X", "Y"] - ) - result1 = df1.expanding().corr(df2, pairwise=True).loc[2] - result2 = df1.expanding().corr(df2a, pairwise=True).loc[2] - result3 = df1a.expanding().corr(df2, pairwise=True).loc[2] - result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2] - expected = DataFrame( - [[-1.0, -1.0], [-1.0, -1.0]], columns=["A", "B"], index=Index(["X", "Y"]) - ) - tm.assert_frame_equal(result1, expected) - tm.assert_frame_equal(result2, expected) - tm.assert_frame_equal(result3, expected) - tm.assert_frame_equal(result4, expected) - def test_rolling_skew_edge_cases(self): all_nan = Series([np.NaN] * 5) @@ -2334,83 +1279,6 @@ def test_rolling_kurt_eq_value_fperr(self): a = Series([1.1] * 15).rolling(window=10).kurt() assert np.isnan(a).all() - @pytest.mark.parametrize( - "func,static_comp", - [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)], - ids=["sum", "mean", "max", "min"], - ) - def test_expanding_func(self, func, static_comp): - def expanding_func(x, min_periods=1, center=False, axis=0): - exp = x.expanding(min_periods=min_periods, center=center, axis=axis) - return getattr(exp, func)() - - self._check_expanding(expanding_func, static_comp, preserve_nan=False) - - def test_expanding_apply(self, raw): - def expanding_mean(x, min_periods=1): - - exp = x.expanding(min_periods=min_periods) - result = exp.apply(lambda x: x.mean(), raw=raw) - return result - - # TODO(jreback), needed to add preserve_nan=False - # here to make this pass - self._check_expanding(expanding_mean, np.mean, preserve_nan=False) - - ser = Series([], dtype=np.float64) - tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean(), raw=raw)) - - # GH 8080 - s = Series([None, None, None]) - result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw) - expected = Series([1.0, 2.0, 3.0]) - tm.assert_series_equal(result, expected) - - def _check_expanding( - self, - func, - static_comp, - has_min_periods=True, - has_time_rule=True, - preserve_nan=True, - ): - - series_result = func(self.series) - assert isinstance(series_result, Series) - frame_result = func(self.frame) - assert isinstance(frame_result, DataFrame) - - result = func(self.series) - tm.assert_almost_equal(result[10], static_comp(self.series[:11])) - - if preserve_nan: - assert result.iloc[self._nan_locs].isna().all() - - ser = Series(randn(50)) - - if has_min_periods: - result = func(ser, min_periods=30) - assert result[:29].isna().all() - tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) - - # min_periods is working correctly - result = func(ser, min_periods=15) - assert isna(result.iloc[13]) - assert notna(result.iloc[14]) - - ser2 = Series(randn(20)) - result = func(ser2, min_periods=5) - assert isna(result[3]) - assert notna(result[4]) - - # min_periods=0 - result0 = func(ser, min_periods=0) - result1 = func(ser, min_periods=1) - tm.assert_almost_equal(result0, result1) - else: - result = func(ser) - tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) - def test_rolling_max_gh6297(self): """Replicate result expected in GH #6297""" @@ -2532,3 +1400,76 @@ def test_rolling_min_max_numeric_types(self): assert result.dtypes[0] == np.dtype("f8") result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min() assert result.dtypes[0] == np.dtype("f8") + + def test_moment_functions_zero_length(self): + # GH 8056 + s = Series(dtype=np.float64) + s_expected = s + df1 = DataFrame() + df1_expected = df1 + df2 = DataFrame(columns=["a"]) + df2["a"] = df2["a"].astype("float64") + df2_expected = df2 + + functions = [ + lambda x: x.rolling(window=10).count(), + lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False), + lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False), + lambda x: x.rolling(window=10, min_periods=5).max(), + lambda x: x.rolling(window=10, min_periods=5).min(), + lambda x: x.rolling(window=10, min_periods=5).sum(), + lambda x: x.rolling(window=10, min_periods=5).mean(), + lambda x: x.rolling(window=10, min_periods=5).std(), + lambda x: x.rolling(window=10, min_periods=5).var(), + lambda x: x.rolling(window=10, min_periods=5).skew(), + lambda x: x.rolling(window=10, min_periods=5).kurt(), + lambda x: x.rolling(window=10, min_periods=5).quantile(0.5), + lambda x: x.rolling(window=10, min_periods=5).median(), + lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), + lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), + lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), + ] + for f in functions: + try: + s_result = f(s) + tm.assert_series_equal(s_result, s_expected) + + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) + + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) + except (ImportError): + + # scipy needed for rolling_window + continue + + def test_moment_functions_zero_length_pairwise(self): + + df1 = DataFrame() + df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar")) + df2["a"] = df2["a"].astype("float64") + + df1_expected = DataFrame( + index=pd.MultiIndex.from_product([df1.index, df1.columns]), + columns=Index([]), + ) + df2_expected = DataFrame( + index=pd.MultiIndex.from_product( + [df2.index, df2.columns], names=["bar", "foo"] + ), + columns=Index(["a"], name="foo"), + dtype="float64", + ) + + functions = [ + lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), + lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), + ] + + for f in functions: + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) + + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected)
- [x] xref #30486 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30542
2019-12-29T17:51:33Z
2019-12-30T17:52:57Z
2019-12-30T17:52:57Z
2019-12-30T21:01:43Z
TYP: Implicit generic "Any" for builtins
diff --git a/pandas/_typing.py b/pandas/_typing.py index 69b08c581cff9..7b89486751f12 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -23,21 +23,29 @@ from pandas.core.indexes.base import Index # noqa: F401 from pandas.core.series import Series # noqa: F401 from pandas.core.generic import NDFrame # noqa: F401 + from pandas import Interval # noqa: F401 +# array-like AnyArrayLike = TypeVar("AnyArrayLike", "ExtensionArray", "Index", "Series", np.ndarray) ArrayLike = TypeVar("ArrayLike", "ExtensionArray", np.ndarray) + +# scalars + +PythonScalar = Union[str, int, float, bool] DatetimeLikeScalar = TypeVar("DatetimeLikeScalar", "Period", "Timestamp", "Timedelta") +PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"] +Scalar = Union[PythonScalar, PandasScalar] + +# other + Dtype = Union[str, np.dtype, "ExtensionDtype"] FilePathOrBuffer = Union[str, Path, IO[AnyStr]] - FrameOrSeries = TypeVar("FrameOrSeries", bound="NDFrame") -Scalar = Union[str, int, float, bool] Axis = Union[str, int] Ordered = Optional[bool] -JSONSerializable = Union[Scalar, List, Dict] - +JSONSerializable = Union[PythonScalar, List, Dict] Axes = Collection # to maintain type information across generic functions and parametrization -_T = TypeVar("_T") +T = TypeVar("T") diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 4d6be8221557d..53051baa8e67e 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1,6 +1,6 @@ import operator from shutil import get_terminal_size -from typing import Type, Union, cast +from typing import Dict, Hashable, List, Type, Union, cast from warnings import warn import numpy as np @@ -8,7 +8,7 @@ from pandas._config import get_option from pandas._libs import algos as libalgos, hashtable as htable -from pandas._typing import ArrayLike, Dtype, Ordered +from pandas._typing import ArrayLike, Dtype, Ordered, Scalar from pandas.compat.numpy import function as nv from pandas.util._decorators import ( Appender, @@ -511,7 +511,7 @@ def itemsize(self) -> int: """ return self.categories.itemsize - def tolist(self) -> list: + def tolist(self) -> List[Scalar]: """ Return a list of the values. @@ -2067,7 +2067,7 @@ def __setitem__(self, key, value): lindexer = self._maybe_coerce_indexer(lindexer) self._codes[key] = lindexer - def _reverse_indexer(self): + def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]: """ Compute the inverse of a categorical, returning a dict of categories -> indexers. @@ -2097,8 +2097,8 @@ def _reverse_indexer(self): self.codes.astype("int64"), categories.size ) counts = counts.cumsum() - result = (r[start:end] for start, end in zip(counts, counts[1:])) - result = dict(zip(categories, result)) + _result = (r[start:end] for start, end in zip(counts, counts[1:])) + result = dict(zip(categories, _result)) return result # reduction ops # diff --git a/pandas/core/common.py b/pandas/core/common.py index 9017584171850..8a430a4aa7d11 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -9,11 +9,12 @@ from datetime import datetime, timedelta from functools import partial import inspect -from typing import Any, Iterable, Union +from typing import Any, Collection, Iterable, Union import numpy as np from pandas._libs import lib, tslibs +from pandas._typing import T from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import ( @@ -270,7 +271,7 @@ def maybe_make_list(obj): return obj -def maybe_iterable_to_list(obj: Union[Iterable, Any]) -> Union[list, Any]: +def maybe_iterable_to_list(obj: Union[Iterable[T], T]) -> Union[Collection[T], T]: """ If obj is Iterable but not list-like, consume into list. """ diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 2c224a1bef338..747a32ae816be 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -3,7 +3,7 @@ split-apply-combine paradigm. """ -from typing import Hashable, List, Optional, Tuple +from typing import Dict, Hashable, List, Optional, Tuple import numpy as np @@ -419,7 +419,7 @@ def _make_codes(self) -> None: self._group_index = uniques @cache_readonly - def groups(self) -> dict: + def groups(self) -> Dict[Hashable, np.ndarray]: return self.index.groupby(Categorical.from_codes(self.codes, self.group_index)) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a3808f6f4a37e..242c1971911b1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,7 +1,7 @@ from datetime import datetime import operator from textwrap import dedent -from typing import FrozenSet, Hashable, Optional, Union +from typing import Dict, FrozenSet, Hashable, Optional, Union import warnings import numpy as np @@ -4594,7 +4594,7 @@ def _maybe_promote(self, other): return self.astype("object"), other.astype("object") return self, other - def groupby(self, values): + def groupby(self, values) -> Dict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. @@ -4605,7 +4605,7 @@ def groupby(self, values): Returns ------- - groups : dict + dict {group name -> group labels} """ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index b86293e78a80d..ebecb02e20e1a 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1,4 +1,4 @@ -from typing import Tuple +from typing import Hashable, List, Tuple, Union import numpy as np @@ -2224,7 +2224,7 @@ def _convert_key(self, key, is_setter: bool = False): return key -def _tuplify(ndim: int, loc) -> tuple: +def _tuplify(ndim: int, loc: Hashable) -> Tuple[Union[Hashable, slice], ...]: """ Given an indexer for the first dimension, create an equivalent tuple for indexing over all dimensions. @@ -2238,9 +2238,10 @@ def _tuplify(ndim: int, loc) -> tuple: ------- tuple """ - tup = [slice(None, None) for _ in range(ndim)] - tup[0] = loc - return tuple(tup) + _tup: List[Union[Hashable, slice]] + _tup = [slice(None, None) for _ in range(ndim)] + _tup[0] = loc + return tuple(_tup) def convert_to_index_sliceable(obj, key): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index db8d9eb669c20..4f1541e8d127e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1459,7 +1459,7 @@ def copy( data = self.select(k) if isinstance(s, Table): - index: Union[bool, list] = False + index: Union[bool, List[str]] = False if propindexes: index = [a.name for a in s.axes if a.is_indexed] new_store.append( diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 3b01ae0c3c2e8..b2720f9158c6b 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1,7 +1,7 @@ from datetime import datetime from io import StringIO import re -from typing import Dict +from typing import Dict, List, Union import numpy as np import pytest @@ -12,12 +12,12 @@ @pytest.fixture -def mix_ab() -> Dict[str, list]: +def mix_ab() -> Dict[str, List[Union[int, str]]]: return {"a": list(range(4)), "b": list("ab..")} @pytest.fixture -def mix_abc() -> Dict[str, list]: +def mix_abc() -> Dict[str, List[Union[float, str]]]: return {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]}
xref #30539
https://api.github.com/repos/pandas-dev/pandas/pulls/30541
2019-12-29T16:31:27Z
2019-12-31T14:36:42Z
2019-12-31T14:36:42Z
2020-01-05T19:48:25Z
CI: Travis default version of python to 3.7
diff --git a/.travis.yml b/.travis.yml index 0c7740295b637..0a5af6d4348c0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,5 @@ language: python -python: 3.5 +python: 3.7 # To turn off cached cython files and compiler cache # set NOCACHE-true
- [x] closes #30538 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30540
2019-12-29T15:29:42Z
2019-12-30T13:09:59Z
2019-12-30T13:09:59Z
2019-12-31T11:06:07Z
CLN: update check_untyped_defs setup.cfg
diff --git a/setup.cfg b/setup.cfg index 8fb602188dad5..ab02f4fce183f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -232,9 +232,6 @@ check_untyped_defs=False [mypy-pandas.core.indexes.multi] check_untyped_defs=False -[mypy-pandas.core.indexes.timedeltas] -check_untyped_defs=False - [mypy-pandas.core.indexing] check_untyped_defs=False
https://api.github.com/repos/pandas-dev/pandas/pulls/30535
2019-12-29T09:55:48Z
2019-12-30T13:13:23Z
2019-12-30T13:13:23Z
2019-12-30T13:57:25Z
CI: Web_and_Docs failing
diff --git a/environment.yml b/environment.yml index ab10d8b7e0b20..46fb5e7a19078 100644 --- a/environment.yml +++ b/environment.yml @@ -70,7 +70,7 @@ dependencies: - blosc - bottleneck>=1.2.1 - ipykernel - - ipython>=5.6.0 + - ipython>=5.6.0,<=7.10.1 # see gh-30527 - jinja2 # pandas.Styler - matplotlib>=2.2.2 # pandas.plotting, Series.plot, DataFrame.plot - numexpr>=2.6.8 diff --git a/requirements-dev.txt b/requirements-dev.txt index 4df0946ac0078..9f18bf767ae56 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -45,7 +45,7 @@ pip blosc bottleneck>=1.2.1 ipykernel -ipython>=5.6.0 +ipython>=5.6.0,<=7.10.1 jinja2 matplotlib>=2.2.2 numexpr>=2.6.8
- [ ] closes #30527 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30534
2019-12-29T09:50:26Z
2019-12-29T14:17:45Z
2019-12-29T14:17:45Z
2019-12-29T14:27:28Z