title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
adds non regression test for GH27358
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index a3817d3c226f5..7e6b707f01acf 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2397,6 +2397,13 @@ def test_from_records_len0_with_columns(self): assert result.index.name == "foo" tm.assert_index_equal(result.columns, expected) + def test_from_records_series_list_dict(self): + # GH27358 + expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T + data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]]) + result = DataFrame.from_records(data) + tm.assert_frame_equal(result, expected) + def test_to_frame_with_falsey_names(self): # GH 16114 result = Series(name=0).to_frame().dtypes
- [X] closes #27358 - [X] tests added / passed - [ ] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27360
2019-07-12T15:48:33Z
2019-07-12T19:38:24Z
2019-07-12T19:38:24Z
2019-07-12T19:38:29Z
TST: add test for multiindex partial indexing both axis
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py index 3c65f1b8abddb..b1519d82e1aa7 100644 --- a/pandas/tests/indexing/multiindex/test_partial.py +++ b/pandas/tests/indexing/multiindex/test_partial.py @@ -188,3 +188,14 @@ def test_setitem_multiple_partial(self, multiindex_dataframe_random_data): expected.loc["foo"] = 0 expected.loc["bar"] = 0 tm.assert_series_equal(result, expected) + + +def test_loc_getitem_partial_both_axis(): + # gh-12660 + iterables = [["a", "b"], [2, 1]] + columns = MultiIndex.from_product(iterables, names=["col1", "col2"]) + rows = MultiIndex.from_product(iterables, names=["row1", "row2"]) + df = DataFrame(np.random.randn(4, 4), index=rows, columns=columns) + expected = df.iloc[:2, 2:].droplevel("row1").droplevel("col1", axis=1) + result = df.loc["a", "b"] + tm.assert_frame_equal(result, expected)
- [ ] closes #12660 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27359
2019-07-12T15:21:16Z
2019-07-12T16:01:03Z
2019-07-12T16:01:02Z
2019-07-12T16:12:45Z
add type annotations to io\formats\html.py
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index c4e3dd1c755cf..0e8ed7b25d665 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -400,7 +400,6 @@ def _get_adjustment(): class TableFormatter: - is_truncated = False show_dimensions = None @property diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index e6aae44baa69b..c2f4ee2c4a68b 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -4,15 +4,20 @@ from collections import OrderedDict from textwrap import dedent +from typing import Dict, List, Optional, Tuple, Union from pandas._config import get_option -from pandas.core.dtypes.generic import ABCMultiIndex +from pandas.core.dtypes.generic import ABCIndex, ABCMultiIndex from pandas import option_context from pandas.io.common import _is_url -from pandas.io.formats.format import TableFormatter, get_level_lengths +from pandas.io.formats.format import ( + DataFrameFormatter, + TableFormatter, + get_level_lengths, +) from pandas.io.formats.printing import pprint_thing @@ -28,13 +33,18 @@ class HTMLFormatter(TableFormatter): indent_delta = 2 - def __init__(self, formatter, classes=None, border=None): + def __init__( + self, + formatter: DataFrameFormatter, + classes: Optional[Union[str, List, Tuple]] = None, + border: Optional[bool] = None, + ) -> None: self.fmt = formatter self.classes = classes self.frame = self.fmt.frame self.columns = self.fmt.tr_frame.columns - self.elements = [] + self.elements = [] # type: List[str] self.bold_rows = self.fmt.kwds.get("bold_rows", False) self.escape = self.fmt.kwds.get("escape", True) self.show_dimensions = self.fmt.show_dimensions @@ -47,15 +57,15 @@ def __init__(self, formatter, classes=None, border=None): self.fmt.col_space = "{colspace}px".format(colspace=self.fmt.col_space) @property - def show_row_idx_names(self): + def show_row_idx_names(self) -> bool: return self.fmt.show_row_idx_names @property - def show_col_idx_names(self): + def show_col_idx_names(self) -> bool: return self.fmt.show_col_idx_names @property - def row_levels(self): + def row_levels(self) -> int: if self.fmt.index: # showing (row) index return self.frame.index.nlevels @@ -69,22 +79,24 @@ def row_levels(self): # not showing (row) index return 0 - def _get_columns_formatted_values(self): + def _get_columns_formatted_values(self) -> ABCIndex: return self.columns @property - def is_truncated(self): + def is_truncated(self) -> bool: return self.fmt.is_truncated @property - def ncols(self): + def ncols(self) -> int: return len(self.fmt.tr_frame.columns) - def write(self, s, indent=0): + def write(self, s: str, indent: int = 0) -> None: rs = pprint_thing(s) self.elements.append(" " * indent + rs) - def write_th(self, s, header=False, indent=0, tags=None): + def write_th( + self, s: str, header: bool = False, indent: int = 0, tags: Optional[str] = None + ) -> None: """ Method for writting a formatted <th> cell. @@ -111,12 +123,14 @@ def write_th(self, s, header=False, indent=0, tags=None): tags = tags or "" tags += 'style="min-width: {colspace};"'.format(colspace=self.fmt.col_space) - return self._write_cell(s, kind="th", indent=indent, tags=tags) + self._write_cell(s, kind="th", indent=indent, tags=tags) - def write_td(self, s, indent=0, tags=None): - return self._write_cell(s, kind="td", indent=indent, tags=tags) + def write_td(self, s: str, indent: int = 0, tags: Optional[str] = None) -> None: + self._write_cell(s, kind="td", indent=indent, tags=tags) - def _write_cell(self, s, kind="td", indent=0, tags=None): + def _write_cell( + self, s: str, kind: str = "td", indent: int = 0, tags: Optional[str] = None + ) -> None: if tags is not None: start_tag = "<{kind} {tags}>".format(kind=kind, tags=tags) else: @@ -124,7 +138,9 @@ def _write_cell(self, s, kind="td", indent=0, tags=None): if self.escape: # escape & first to prevent double escaping of & - esc = OrderedDict([("&", r"&amp;"), ("<", r"&lt;"), (">", r"&gt;")]) + esc = OrderedDict( + [("&", r"&amp;"), ("<", r"&lt;"), (">", r"&gt;")] + ) # type: Union[OrderedDict[str, str], Dict] else: esc = {} @@ -146,14 +162,14 @@ def _write_cell(self, s, kind="td", indent=0, tags=None): def write_tr( self, - line, - indent=0, - indent_delta=0, - header=False, - align=None, - tags=None, - nindex_levels=0, - ): + line: List[str], + indent: int = 0, + indent_delta: int = 0, + header: bool = False, + align: Optional[str] = None, + tags: Optional[Dict[int, str]] = None, + nindex_levels: int = 0, + ) -> None: if tags is None: tags = {} @@ -173,7 +189,7 @@ def write_tr( indent -= indent_delta self.write("</tr>", indent) - def render(self): + def render(self) -> List[str]: self._write_table() if self.should_show_dimensions: @@ -186,7 +202,7 @@ def render(self): return self.elements - def _write_table(self, indent=0): + def _write_table(self, indent: int = 0) -> None: _classes = ["dataframe"] # Default class. use_mathjax = get_option("display.html.use_mathjax") if not use_mathjax: @@ -220,7 +236,7 @@ def _write_table(self, indent=0): self.write("</table>", indent) - def _write_col_header(self, indent): + def _write_col_header(self, indent: int) -> None: truncate_h = self.fmt.truncate_h if isinstance(self.columns, ABCMultiIndex): template = 'colspan="{span:d}" halign="left"' @@ -337,14 +353,14 @@ def _write_col_header(self, indent): self.write_tr(row, indent, self.indent_delta, header=True, align=align) - def _write_row_header(self, indent): + def _write_row_header(self, indent: int) -> None: truncate_h = self.fmt.truncate_h row = [x if x is not None else "" for x in self.frame.index.names] + [""] * ( self.ncols + (1 if truncate_h else 0) ) self.write_tr(row, indent, self.indent_delta, header=True) - def _write_header(self, indent): + def _write_header(self, indent: int) -> None: self.write("<thead>", indent) if self.fmt.header: @@ -355,12 +371,12 @@ def _write_header(self, indent): self.write("</thead>", indent) - def _get_formatted_values(self): + def _get_formatted_values(self) -> Dict[int, List[str]]: with option_context("display.max_colwidth", 999999): fmt_values = {i: self.fmt._format_col(i) for i in range(self.ncols)} return fmt_values - def _write_body(self, indent): + def _write_body(self, indent: int) -> None: self.write("<tbody>", indent) fmt_values = self._get_formatted_values() @@ -372,7 +388,9 @@ def _write_body(self, indent): self.write("</tbody>", indent) - def _write_regular_rows(self, fmt_values, indent): + def _write_regular_rows( + self, fmt_values: Dict[int, List[str]], indent: int + ) -> None: truncate_h = self.fmt.truncate_h truncate_v = self.fmt.truncate_v @@ -385,7 +403,7 @@ def _write_regular_rows(self, fmt_values, indent): else: index_values = self.fmt.tr_frame.index.format() - row = [] + row = [] # type: List[str] for i in range(nrows): if truncate_v and i == (self.fmt.tr_row_num): @@ -416,7 +434,9 @@ def _write_regular_rows(self, fmt_values, indent): row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels ) - def _write_hierarchical_rows(self, fmt_values, indent): + def _write_hierarchical_rows( + self, fmt_values: Dict[int, List[str]], indent: int + ) -> None: template = 'rowspan="{span}" valign="top"' truncate_h = self.fmt.truncate_h @@ -546,13 +566,13 @@ class NotebookFormatter(HTMLFormatter): DataFrame._repr_html_() and DataFrame.to_html(notebook=True) """ - def _get_formatted_values(self): + def _get_formatted_values(self) -> Dict[int, List[str]]: return {i: self.fmt._format_col(i) for i in range(self.ncols)} - def _get_columns_formatted_values(self): + def _get_columns_formatted_values(self) -> List[str]: return self.columns.format() - def write_style(self): + def write_style(self) -> None: # We use the "scoped" attribute here so that the desired # style properties for the data frame are not then applied # throughout the entire notebook. @@ -580,7 +600,7 @@ def write_style(self): template = dedent("\n".join((template_first, template_mid, template_last))) self.write(template) - def render(self): + def render(self) -> List[str]: self.write("<div>") self.write_style() super().render()
https://api.github.com/repos/pandas-dev/pandas/pulls/27355
2019-07-12T12:34:57Z
2019-07-12T14:12:49Z
2019-07-12T14:12:49Z
2019-07-12T14:53:40Z
TST/CLN: Add message checks to raises KeyError tests
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 77be952506964..1ef10ea5857d0 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -1,4 +1,5 @@ from datetime import datetime +import re import numpy as np import pytest @@ -1120,9 +1121,10 @@ def test_raise_on_drop_duplicate_index(self, actual): # issue 19186 level = 0 if isinstance(actual.index, MultiIndex) else None - with pytest.raises(KeyError): + msg = re.escape("\"['c'] not found in axis\"") + with pytest.raises(KeyError, match=msg): actual.drop("c", level=level, axis=0) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): actual.T.drop("c", level=level, axis=1) expected_no_err = actual.drop("c", axis=0, level=level, errors="ignore") assert_frame_equal(expected_no_err, actual) diff --git a/pandas/tests/frame/test_duplicates.py b/pandas/tests/frame/test_duplicates.py index 0ea24777ae1f5..d2a1fc43d2046 100644 --- a/pandas/tests/frame/test_duplicates.py +++ b/pandas/tests/frame/test_duplicates.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -9,11 +11,12 @@ def test_duplicated_with_misspelled_column_name(subset): # GH 19730 df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]}) + msg = re.escape("Index(['a'], dtype='object')") - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): df.duplicated(subset) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): df.drop_duplicates(subset) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index c2d38b2938fca..f0c4cf7545fba 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -1,4 +1,5 @@ from datetime import date, datetime, time, timedelta +import re from warnings import catch_warnings, simplefilter import numpy as np @@ -59,7 +60,7 @@ def test_getitem(self, float_frame): ad = np.random.randn(len(df)) df["@awesome_domain"] = ad - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")): df.__getitem__('df["$10"]') res = df["@awesome_domain"] @@ -67,7 +68,8 @@ def test_getitem(self, float_frame): def test_getitem_dupe_cols(self): df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"]) - with pytest.raises(KeyError): + msg = "\"None of [Index(['baf'], dtype='object')] are in the [columns]\"" + with pytest.raises(KeyError, match=re.escape(msg)): df[["baf"]] def test_get(self, float_frame): @@ -446,14 +448,16 @@ def test_getitem_setitem_ix_negative_integers(self, float_frame): df = DataFrame(np.random.randn(8, 4)) # ix does label-based indexing when having an integer index + msg = "\"None of [Int64Index([-1], dtype='int64')] are in the [index]\"" with catch_warnings(record=True): simplefilter("ignore", FutureWarning) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape(msg)): df.ix[[-1]] + msg = "\"None of [Int64Index([-1], dtype='int64')] are in the [columns]\"" with catch_warnings(record=True): simplefilter("ignore", FutureWarning) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape(msg)): df.ix[:, [-1]] # #1942 @@ -497,7 +501,11 @@ def test_setitem(self, float_frame): float_frame["col6"] = series tm.assert_series_equal(series, float_frame["col6"], check_names=False) - with pytest.raises(KeyError): + msg = ( + r"\"None of \[Float64Index\(\[.*dtype='float64'\)\] are in the" + r" \[columns\]\"" + ) + with pytest.raises(KeyError, match=msg): float_frame[np.random.randn(len(float_frame) + 1)] = 1 # set ndarray @@ -1884,10 +1892,10 @@ def test_lookup_bool(self): assert df["mask"].dtype == np.bool_ def test_lookup_raises(self, float_frame): - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'One or more row labels was not found'"): float_frame.lookup(["xyz"], ["A"]) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'One or more column labels was not found'"): float_frame.lookup([float_frame.index[0]], ["xyz"]) with pytest.raises(ValueError, match="same size"): @@ -2543,7 +2551,9 @@ def test_xs(self, float_frame, datetime_frame): assert xs["A"] == 1 assert xs["B"] == "1" - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00', freq='B')") + ): datetime_frame.xs(datetime_frame.index[0] - BDay()) # xs get column diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py index ed9eeb594f7f6..7ad5abca82b29 100644 --- a/pandas/tests/frame/test_mutate_columns.py +++ b/pandas/tests/frame/test_mutate_columns.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -88,9 +90,9 @@ def test_assign_dependent_old_python(self): df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) # Key C does not exist at definition time of df - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^'C'$"): df.assign(C=lambda df: df.A, D=lambda df: df["A"] + df["C"]) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^'C'$"): df.assign(C=df.A, D=lambda x: x["A"] + x["C"]) @pytest.mark.skipif( @@ -219,14 +221,14 @@ def test_delitem_multiindex(self): # A still in the levels, BUT get a KeyError if trying # to delete assert ("A",) not in df.columns - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("('A',)")): del df[("A",)] # behavior of dropped/deleted MultiIndex levels changed from # GH 2770 to GH 19027: MultiIndex no longer '.__contains__' # levels which are dropped/deleted assert "A" not in df.columns - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("('A',)")): del df["A"] def test_pop(self, float_frame): diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index b2b38980d0ceb..7b9e50ebbf342 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -618,7 +618,9 @@ def test_sample(sel): df.sample(n=1, weights="weight_column", axis=1) # Check weighting key error - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match="'String passed to weights not a valid column'" + ): df.sample(n=3, weights="not_a_real_column_name") # Check that re-normalizes weights that don't sum to one. diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index d201b887739ec..e1e35d8eb7d18 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -206,7 +206,7 @@ def test_timegrouper_with_reg_groups(self): result = df.groupby([pd.Grouper(freq="1M", key="Date"), "Buyer"]).sum() assert_frame_equal(result, expected) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'The grouper name foo is not found'"): df.groupby([pd.Grouper(freq="1M", key="foo"), "Buyer"]).sum() # passing the level diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 2a5bbdbb131ed..cd5efc86320c2 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -614,7 +614,7 @@ def test_get_loc(self): ) with pytest.raises(ValueError, match="unit abbreviation w/o a number"): idx.get_loc("2000-01-01T12", method="nearest", tolerance="foo") - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'2000-01-01T03'"): idx.get_loc("2000-01-01T03", method="nearest", tolerance="2 hours") with pytest.raises( ValueError, match="tolerance size must match target index size" @@ -634,12 +634,12 @@ def test_get_loc(self): assert idx.get_loc("1999", method="nearest") == 0 assert idx.get_loc("2001", method="nearest") == 2 - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'1999'"): idx.get_loc("1999", method="pad") - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'2001'"): idx.get_loc("2001", method="backfill") - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'foobar'"): idx.get_loc("foobar") with pytest.raises(TypeError): idx.get_loc(slice(2)) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 962ed2b1cf8ed..c61af1ce70aed 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -445,7 +445,7 @@ def test_get_loc_length_one_scalar(self, scalar, closed): result = index.get_loc(scalar) assert result == 0 else: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=str(scalar)): index.get_loc(scalar) @pytest.mark.parametrize("other_closed", ["left", "right", "both", "neither"]) @@ -458,7 +458,14 @@ def test_get_loc_length_one_interval(self, left, right, closed, other_closed): result = index.get_loc(interval) assert result == 0 else: - with pytest.raises(KeyError): + with pytest.raises( + KeyError, + match=re.escape( + "Interval({left}, {right}, closed='{other_closed}')".format( + left=left, right=right, other_closed=other_closed + ) + ), + ): index.get_loc(interval) # Make consistent with test_interval_new.py (see #16316, #16386) diff --git a/pandas/tests/indexes/interval/test_interval_new.py b/pandas/tests/indexes/interval/test_interval_new.py index ab9f7ef1c3e26..d92559d2e3e49 100644 --- a/pandas/tests/indexes/interval/test_interval_new.py +++ b/pandas/tests/indexes/interval/test_interval_new.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -15,16 +17,21 @@ def test_get_loc_interval(self, closed, side): for bound in [[0, 1], [1, 2], [2, 3], [3, 4], [0, 2], [2.5, 3], [-1, 4]]: # if get_loc is supplied an interval, it should only search # for exact matches, not overlaps or covers, else KeyError. + msg = re.escape( + "Interval({bound[0]}, {bound[1]}, closed='{side}')".format( + bound=bound, side=side + ) + ) if closed == side: if bound == [0, 1]: assert idx.get_loc(Interval(0, 1, closed=side)) == 0 elif bound == [2, 3]: assert idx.get_loc(Interval(2, 3, closed=side)) == 1 else: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): idx.get_loc(Interval(*bound, closed=side)) else: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): idx.get_loc(Interval(*bound, closed=side)) @pytest.mark.parametrize("scalar", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5]) @@ -81,18 +88,42 @@ def test_slice_locs_with_interval(self): # unsorted duplicates index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)]) - with pytest.raises(KeyError): + with pytest.raises( + KeyError, + match=re.escape( + '"Cannot get left slice bound for non-unique label:' + " Interval(0, 2, closed='right')\"" + ), + ): index.slice_locs(start=Interval(0, 2), end=Interval(2, 4)) - with pytest.raises(KeyError): + with pytest.raises( + KeyError, + match=re.escape( + '"Cannot get left slice bound for non-unique label:' + " Interval(0, 2, closed='right')\"" + ), + ): index.slice_locs(start=Interval(0, 2)) assert index.slice_locs(end=Interval(2, 4)) == (0, 2) - with pytest.raises(KeyError): + with pytest.raises( + KeyError, + match=re.escape( + '"Cannot get right slice bound for non-unique label:' + " Interval(0, 2, closed='right')\"" + ), + ): index.slice_locs(end=Interval(0, 2)) - with pytest.raises(KeyError): + with pytest.raises( + KeyError, + match=re.escape( + '"Cannot get right slice bound for non-unique label:' + " Interval(0, 2, closed='right')\"" + ), + ): index.slice_locs(start=Interval(2, 4), end=Interval(0, 2)) # another unsorted duplicates @@ -139,7 +170,13 @@ def test_slice_locs_with_ints_and_floats_succeeds(self): def test_slice_locs_with_ints_and_floats_errors(self, tuples, query): start, stop = query index = IntervalIndex.from_tuples(tuples) - with pytest.raises(KeyError): + with pytest.raises( + KeyError, + match=( + "'can only get slices from an IntervalIndex if bounds are" + " non-overlapping and all monotonic increasing or decreasing'" + ), + ): index.slice_locs(start, stop) @pytest.mark.parametrize( diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index b7104242b5ccc..87f9eaa209277 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -62,7 +62,7 @@ def test_get_loc(self, tree): expected = np.array([0, 1], dtype="intp") tm.assert_numpy_array_equal(result, expected) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="-1"): tree.get_loc(-1) def test_get_indexer(self, tree): @@ -70,7 +70,9 @@ def test_get_indexer(self, tree): expected = np.array([0, 4, -1], dtype="intp") tm.assert_numpy_array_equal(result, expected) - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match="'indexer does not intersect a unique set of intervals'" + ): tree.get_indexer(np.array([3.0])) def test_get_indexer_non_unique(self, tree): @@ -100,7 +102,9 @@ def test_duplicates(self, dtype): expected = np.array([0, 1, 2], dtype="intp") tm.assert_numpy_array_equal(result, expected) - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match="'indexer does not intersect a unique set of intervals'" + ): tree.get_indexer(np.array([0.5])) indexer, missing = tree.get_indexer_non_unique(np.array([0.5])) @@ -116,7 +120,7 @@ def test_get_loc_closed(self, closed): tree = IntervalTree([0], [1], closed=closed) for p, errors in [(0, tree.open_left), (1, tree.open_right)]: if errors: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=str(p)): tree.get_loc(p) else: result = tree.get_loc(p) diff --git a/pandas/tests/indexes/multi/test_partial_indexing.py b/pandas/tests/indexes/multi/test_partial_indexing.py index d6799e86683a9..5db1296d828ca 100644 --- a/pandas/tests/indexes/multi/test_partial_indexing.py +++ b/pandas/tests/indexes/multi/test_partial_indexing.py @@ -54,7 +54,7 @@ def test_partial_string_timestamp_multiindex(): # ambiguous and we don't want to extend this behavior forward to work # in multi-indexes. This would amount to selecting a scalar from a # column. - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'2016-01-01'"): df["2016-01-01"] # partial string match on year only @@ -83,7 +83,7 @@ def test_partial_string_timestamp_multiindex(): tm.assert_frame_equal(result, expected) # Slicing date on first level should break (of course) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'2016-01-01'"): df_swap.loc["2016-01-01"] # GH12685 (partial string with daily resolution or below) diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py index c62bc80cfb53f..3dee1dbecf3ba 100644 --- a/pandas/tests/indexes/multi/test_sorting.py +++ b/pandas/tests/indexes/multi/test_sorting.py @@ -115,7 +115,7 @@ def test_unsortedindex(): df.sort_index(inplace=True) assert len(df.loc(axis=0)["z", :]) == 2 - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'q'"): df.loc(axis=0)["q", :] diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 3f66891caddc3..cf03e2c7847f0 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -200,7 +200,7 @@ def test_getitem_day(self): invalid = ["2013/02/01 9H", "2013/02/01 09:00"] for v in invalid: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=v): s[v] diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 2b9632acd83ca..e79991f652154 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -697,7 +697,7 @@ def test_get_loc(self): assert cidx1.get_loc("e") == idx1.get_loc("e") for i in [cidx1, idx1]: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'NOT-EXIST'"): i.get_loc("NOT-EXIST") # non-unique @@ -716,7 +716,7 @@ def test_get_loc(self): assert res == 4 for i in [cidx2, idx2]: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'NOT-EXIST'"): i.get_loc("NOT-EXIST") # non-unique, sliceable @@ -733,7 +733,7 @@ def test_get_loc(self): assert res == slice(2, 5, None) for i in [cidx3, idx3]: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'c'"): i.get_loc("c") def test_repr_roundtrip(self): diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 0400b7810ecc9..605df9971a567 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -34,7 +34,9 @@ def test_droplevel(self, indices): indices.droplevel(level) for level in "wrong", ["wrong"]: - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match=re.escape("'Level wrong must be same as name (None)'") + ): indices.droplevel(level) def test_constructor_non_hashable_name(self, indices): diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 213d9c6505229..58b98297f00f3 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -311,7 +311,7 @@ def test_cached_data(self): df.loc[50] assert idx._cached_data is None - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="51"): df.loc[51] assert idx._cached_data is None @@ -1027,13 +1027,13 @@ def test_engineless_lookup(self): tm.assert_numpy_array_equal( idx.get_indexer([2, 8]), ensure_platform_int(np.array([0, 2])) ) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="3"): idx.get_loc(3) assert "_engine" not in idx._cache # The engine is still required for lookup of a different dtype scalar: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'a'"): assert idx.get_loc("a") == -1 assert "_engine" in idx._cache diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index 1bdb665101d41..7ae42782774db 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -41,9 +41,9 @@ def test_nonoverlapping_monotonic(self, direction, closed): assert s[key] == expected assert s.loc[key] == expected else: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=str(key)): s[key] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=str(key)): s.loc[key] for key, expected in zip(idx.right, s): @@ -51,9 +51,9 @@ def test_nonoverlapping_monotonic(self, direction, closed): assert s[key] == expected assert s.loc[key] == expected else: - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=str(key)): s[key] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=str(key)): s.loc[key] for key, expected in zip(idx.mid, s): @@ -65,10 +65,10 @@ def test_non_matching(self): # this is a departure from our current # indexin scheme, but simpler - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^$"): s.loc[[-1, 3, 4, 5]] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^$"): s.loc[[-1, 3]] def test_large_series(self): @@ -93,7 +93,7 @@ def test_loc_getitem_frame(self): expected = df.iloc[4:6] tm.assert_frame_equal(result, expected) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="10"): df.loc[10] # single list-like @@ -106,9 +106,9 @@ def test_loc_getitem_frame(self): expected = df.take([4, 5, 4, 5]) tm.assert_frame_equal(result, expected) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^$"): df.loc[[10]] # partial missing - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^$"): df.loc[[10, 4]] diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py index 92c71bbc6eb32..a86a9d16d3f9f 100644 --- a/pandas/tests/indexing/interval/test_interval_new.py +++ b/pandas/tests/indexing/interval/test_interval_new.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -30,31 +32,35 @@ def test_loc_with_interval(self): tm.assert_series_equal(expected, result) # missing or not exact - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='left')")): s.loc[Interval(3, 5, closed="left")] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='left')")): s[Interval(3, 5, closed="left")] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): s[Interval(3, 5)] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): s.loc[Interval(3, 5)] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): s[Interval(3, 5)] - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match=re.escape("Interval(-2, 0, closed='right')") + ): s.loc[Interval(-2, 0)] - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match=re.escape("Interval(-2, 0, closed='right')") + ): s[Interval(-2, 0)] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("Interval(5, 6, closed='right')")): s.loc[Interval(5, 6)] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("Interval(5, 6, closed='right')")): s[Interval(5, 6)] def test_loc_with_scalar(self): @@ -175,16 +181,16 @@ def test_loc_with_overlap(self): result = s[[Interval(1, 5), Interval(3, 7)]] tm.assert_series_equal(expected, result) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): s.loc[Interval(3, 5)] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^$"): s.loc[[Interval(3, 5)]] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): s[Interval(3, 5)] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^$"): s[[Interval(3, 5)]] # slices with interval (only exact matches) @@ -195,15 +201,17 @@ def test_loc_with_overlap(self): result = s[Interval(1, 5) : Interval(3, 7)] tm.assert_series_equal(expected, result) - with pytest.raises(KeyError): + msg = "'can only get slices from an IntervalIndex if bounds are" + " non-overlapping and all monotonic increasing or decreasing'" + with pytest.raises(KeyError, match=msg): s.loc[Interval(1, 6) : Interval(3, 8)] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): s[Interval(1, 6) : Interval(3, 8)] # slices with scalar raise for overlapping intervals # TODO KeyError is the appropriate error? - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): s.loc[1:4] def test_non_unique(self): diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 0dccf023c66f8..c365c985eb4b6 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -355,7 +355,11 @@ def test_loc_listlike(self): assert_frame_equal(result, expected, check_index_type=True) # not all labels in the categories - with pytest.raises(KeyError): + with pytest.raises( + KeyError, + match="'a list-indexer must only include values that are in the" + " categories'", + ): self.df2.loc[["a", "d"]] def test_loc_listlike_dtypes(self): diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index ba144909724cf..d604758565b86 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -1,6 +1,7 @@ """ test fancy indexing & misc """ from datetime import datetime +import re from warnings import catch_warnings, simplefilter import weakref @@ -336,7 +337,12 @@ def test_dups_fancy_indexing(self): # List containing only missing label dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD")) - with pytest.raises(KeyError): + with pytest.raises( + KeyError, + match=re.escape( + "\"None of [Index(['E'], dtype='object')] are in the [index]\"" + ), + ): dfnu.loc[["E"]] # ToDo: check_index_type can be True after GH 11497 @@ -425,7 +431,7 @@ def test_multitype_list_index_access(self): # GH 10610 df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23]) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=re.escape("'[-8, 26] not in index'")): df[[22, 26, -8]] assert df[21].shape[0] == df.shape[0] @@ -641,18 +647,18 @@ def test_string_slice(self): # dtype should properly raises KeyError df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object)) assert df.index.is_all_dates - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'2011'"): df["2011"] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'2011'"): df.loc["2011", 0] df = DataFrame() assert not df.index.is_all_dates - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'2011'"): df["2011"] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'2011'"): df.loc["2011", 0] def test_astype_assignment(self): @@ -855,9 +861,9 @@ def test_mixed_index_assignment(self): def test_mixed_index_no_fallback(self): # GH 19860 s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2]) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^0$"): s.at[0] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^4$"): s.at[4] def test_rhs_alignment(self): diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index d749e697c8282..9afa141b365e4 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1,5 +1,6 @@ """ test label based indexing with loc """ from io import StringIO +import re from warnings import catch_warnings, filterwarnings import numpy as np @@ -425,7 +426,12 @@ def test_loc_getitem_list_with_fail(self): s.loc[[2]] - with pytest.raises(KeyError): + with pytest.raises( + KeyError, + match=re.escape( + "\"None of [Int64Index([3], dtype='int64')] are in the [index]\"" + ), + ): s.loc[[3]] # a non-match and a match diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index e6ccee684b76b..3eb16e0a64b6d 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -203,9 +203,9 @@ def test_mixed_index_at_iat_loc_iloc_series(self): for i in range(len(s)): assert s.iat[i] == s.iloc[i] == i + 1 - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^4$"): s.at[4] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^4$"): s.loc[4] def test_mixed_index_at_iat_loc_iloc_dataframe(self): @@ -221,9 +221,9 @@ def test_mixed_index_at_iat_loc_iloc_dataframe(self): for i in range(5): assert df.iat[row, i] == df.iloc[row, i] == row * 5 + i - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^3$"): df.at[0, 3] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="^3$"): df.loc[0, 3] def test_iat_setter_incompatible_assignment(self): diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index 0908ed885a6ca..8ad09549f3cbe 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -1024,7 +1024,9 @@ def test_invalid_columns(self, engine, ext): read_frame = pd.read_excel(self.path, "test1", index_col=0) tm.assert_frame_equal(expected, read_frame) - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match="'passes columns are not ALL present dataframe'" + ): write_frame.to_excel(self.path, "test1", columns=["C", "D"]) def test_comment_arg(self, engine, ext): diff --git a/pandas/tests/io/pytables/test_pytables.py b/pandas/tests/io/pytables/test_pytables.py index 946334b5df05e..d67f2c3b7bd66 100644 --- a/pandas/tests/io/pytables/test_pytables.py +++ b/pandas/tests/io/pytables/test_pytables.py @@ -4,6 +4,7 @@ from distutils.version import LooseVersion from io import BytesIO import os +import re import tempfile from warnings import catch_warnings, simplefilter @@ -648,7 +649,7 @@ def test_get(self): right = store["/a"] tm.assert_series_equal(left, right) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'No object named b in the file'"): store.get("b") @pytest.mark.parametrize( @@ -1300,7 +1301,7 @@ def test_read_missing_key_close_store(self): df = pd.DataFrame({"a": range(2), "b": range(2)}) df.to_hdf(path, "k1") - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'No object named k2 in the file'"): pd.read_hdf(path, "k2") # smoke test to test that file is properly closed after @@ -1953,7 +1954,7 @@ def check(obj, comparator): # 0 len df_empty = DataFrame(columns=list("ABC")) store.append("df", df_empty) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'No object named df in the file'"): store.select("df") # repeated append of 0/non-zero frames @@ -2237,7 +2238,9 @@ def test_remove(self): assert len(store) == 0 # nonexistence - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match="'No object named a_nonexistent_store in the file'" + ): store.remove("a_nonexistent_store") # pathing @@ -3530,7 +3533,9 @@ def test_read_column(self): store.append("df", df) # error - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match=re.escape("'column [foo] not found in the table'") + ): store.select_column("df", "foo") with pytest.raises(Exception): @@ -3780,15 +3785,16 @@ def test_select_as_multiple(self): with pytest.raises(Exception): store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1") - with pytest.raises(KeyError): + msg = "'No object named df3 in the file'" + with pytest.raises(KeyError, match=msg): store.select_as_multiple( ["df1", "df3"], where=["A>0", "B>0"], selector="df1" ) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1") - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'No object named df4 in the file'"): store.select_as_multiple( ["df1", "df2"], where=["A>0", "B>0"], selector="df4" ) @@ -4502,7 +4508,9 @@ def test_categorical(self): assert result is not None store.remove("df3") - with pytest.raises(KeyError): + with pytest.raises( + KeyError, match="'No object named df3/meta/s/meta in the file'" + ): store.select("df3/meta/s/meta") def test_categorical_conversion(self):
xref #23922 only KeyError in this PR as precursor to #25996
https://api.github.com/repos/pandas-dev/pandas/pulls/27354
2019-07-12T10:53:21Z
2019-07-12T14:14:35Z
2019-07-12T14:14:35Z
2019-07-12T14:53:58Z
Dispatch Index ops to Series
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index abe20ee0a91ce..e084f99ec5a2c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -70,7 +70,6 @@ from pandas.core.indexes.frozen import FrozenList import pandas.core.missing as missing from pandas.core.ops import get_op_result_name, make_invalid_op -from pandas.core.ops.missing import dispatch_missing import pandas.core.sorting as sorting from pandas.core.strings import StringMethods @@ -144,27 +143,18 @@ def index_arithmetic_method(self, other): out = op(self, other) return Index(out, name=self.name) - other = self._validate_for_numeric_binop(other, op) - # handle time-based others if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)): return self._evaluate_with_timedelta_like(other, op) - elif isinstance(other, (datetime, np.datetime64)): - return self._evaluate_with_datetime_like(other, op) - values = self.values - with np.errstate(all="ignore"): - result = op(values, other) + other = self._validate_for_numeric_binop(other, op) - result = dispatch_missing(op, values, other, result) + from pandas import Series - attrs = self._get_attributes_dict() - attrs = self._maybe_update_attributes(attrs) - if op is divmod: - result = (Index(result[0], **attrs), Index(result[1], **attrs)) - else: - result = Index(result, **attrs) - return result + result = op(Series(self), other) + if isinstance(result, tuple): + return (Index(result[0]), Index(result[1])) + return Index(result) name = "__{name}__".format(name=op.__name__) # TODO: docstring? @@ -2361,10 +2351,14 @@ def _get_unique_index(self, dropna=False): def __add__(self, other): if isinstance(other, (ABCSeries, ABCDataFrame)): return NotImplemented - return Index(np.array(self) + other) + from pandas import Series + + return Index(Series(self) + other) def __radd__(self, other): - return Index(other + np.array(self)) + from pandas import Series + + return Index(other + Series(self)) def __iadd__(self, other): # alias for __add__ diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index ee5c670364485..43fe8f1a8698f 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -38,6 +38,7 @@ ) from pandas.core.dtypes.generic import ( ABCDataFrame, + ABCDatetimeArray, ABCIndex, ABCIndexClass, ABCSeries, @@ -1702,10 +1703,14 @@ def wrapper(left, right): # does inference in the case where `result` has object-dtype. return construct_result(left, result, index=left.index, name=res_name) + elif isinstance(right, (ABCDatetimeArray, pd.DatetimeIndex)): + result = op(left._values, right) + return construct_result(left, result, index=left.index, name=res_name) + lvalues = left.values rvalues = right - if isinstance(rvalues, ABCSeries): - rvalues = rvalues.values + if isinstance(rvalues, (ABCSeries, ABCIndexClass)): + rvalues = rvalues._values with np.errstate(all="ignore"): result = na_op(lvalues, rvalues) diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 3698958261555..01bc345a40b83 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -148,40 +148,26 @@ def mask_zero_div_zero(x, y, result): return result -def dispatch_missing(op, left, right, result): +def dispatch_fill_zeros(op, left, right, result): """ - Fill nulls caused by division by zero, casting to a different dtype - if necessary. + Call fill_zeros with the appropriate fill value depending on the operation, + with special logic for divmod and rdivmod. Parameters ---------- op : function (operator.add, operator.div, ...) - left : object (Index for non-reversed ops) - right : object (Index fof reversed ops) + left : object (np.ndarray for non-reversed ops) + right : object (np.ndarray for reversed ops) result : ndarray Returns ------- - result : ndarray - """ - if op is operator.floordiv: - # Note: no need to do this for truediv; in py3 numpy behaves the way - # we want. - result = mask_zero_div_zero(left, right, result) - elif op is operator.mod: - result = fill_zeros(result, left, right, "__mod__", np.nan) - elif op is divmod: - res0 = mask_zero_div_zero(left, right, result[0]) - res1 = fill_zeros(result[1], left, right, "__divmod__", np.nan) - result = (res0, res1) - return result - + result : np.ndarray -# FIXME: de-duplicate with dispatch_missing -def dispatch_fill_zeros(op, left, right, result): - """ - Call fill_zeros with the appropriate fill value depending on the operation, - with special logic for divmod and rdivmod. + Notes + ----- + For divmod and rdivmod, the `result` parameter and returned `result` + is a 2-tuple of ndarray objects. """ if op is divmod: result = ( diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index f7f6ba8b114e7..fd9db80671360 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -103,18 +103,6 @@ def test_add_extension_scalar(self, other, box, op): result = op(arr, other) tm.assert_equal(result, expected) - @pytest.mark.parametrize( - "box", - [ - pytest.param( - pd.Index, - marks=pytest.mark.xfail(reason="Does not mask nulls", raises=TypeError), - ), - pd.Series, - pd.DataFrame, - ], - ids=lambda x: x.__name__, - ) def test_objarr_add_str(self, box): ser = pd.Series(["x", np.nan, "x"]) expected = pd.Series(["xa", np.nan, "xa"]) @@ -125,18 +113,6 @@ def test_objarr_add_str(self, box): result = ser + "a" tm.assert_equal(result, expected) - @pytest.mark.parametrize( - "box", - [ - pytest.param( - pd.Index, - marks=pytest.mark.xfail(reason="Does not mask nulls", raises=TypeError), - ), - pd.Series, - pd.DataFrame, - ], - ids=lambda x: x.__name__, - ) def test_objarr_radd_str(self, box): ser = pd.Series(["x", np.nan, "x"]) expected = pd.Series(["ax", np.nan, "ax"]) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 605df9971a567..0e9aa07a4c05a 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -144,6 +144,7 @@ def test_set_name_methods(self, indices): assert res is None assert indices.name == new_name assert indices.names == [new_name] + # FIXME: dont leave commented-out # with pytest.raises(TypeError, match="list-like"): # # should still fail even if it would be the right length # ind.set_names("a")
Getting ever closer to a single implementation.
https://api.github.com/repos/pandas-dev/pandas/pulls/27352
2019-07-12T01:00:06Z
2019-07-12T16:01:54Z
2019-07-12T16:01:54Z
2019-07-12T16:05:36Z
CLN/REF: indexing typing, prune unreachable branches
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 263c4013de281..53cb0cedc208b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2889,11 +2889,11 @@ def _set_value(self, index, col, value, takeable=False): _set_value.__doc__ = set_value.__doc__ - def _ixs(self, i, axis=0): + def _ixs(self, i: int, axis: int = 0): """ Parameters ---------- - i : int, slice, or sequence of integers + i : int axis : int Notes @@ -2902,59 +2902,40 @@ def _ixs(self, i, axis=0): """ # irow if axis == 0: - if isinstance(i, slice): - return self[i] - else: - label = self.index[i] - if isinstance(label, Index): - # a location index by definition - result = self.take(i, axis=axis) - copy = True - else: - new_values = self._data.fast_xs(i) - if is_scalar(new_values): - return new_values - - # if we are a copy, mark as such - copy = ( - isinstance(new_values, np.ndarray) and new_values.base is None - ) - result = self._constructor_sliced( - new_values, - index=self.columns, - name=self.index[i], - dtype=new_values.dtype, - ) - result._set_is_copy(self, copy=copy) - return result + label = self.index[i] + new_values = self._data.fast_xs(i) + if is_scalar(new_values): + return new_values + + # if we are a copy, mark as such + copy = isinstance(new_values, np.ndarray) and new_values.base is None + result = self._constructor_sliced( + new_values, + index=self.columns, + name=self.index[i], + dtype=new_values.dtype, + ) + result._set_is_copy(self, copy=copy) + return result # icol else: label = self.columns[i] - if isinstance(i, slice): - # need to return view - lab_slice = slice(label[0], label[-1]) - return self.loc[:, lab_slice] - else: - if isinstance(label, Index): - return self.take(i, axis=1) - index_len = len(self.index) + # if the values returned are not the same length + # as the index (iow a not found value), iget returns + # a 0-len ndarray. This is effectively catching + # a numpy error (as numpy should really raise) + values = self._data.iget(i) - # if the values returned are not the same length - # as the index (iow a not found value), iget returns - # a 0-len ndarray. This is effectively catching - # a numpy error (as numpy should really raise) - values = self._data.iget(i) + if len(self.index) and not len(values): + values = np.array([np.nan] * len(self.index), dtype=object) + result = self._box_col_values(values, label) - if index_len and not len(values): - values = np.array([np.nan] * index_len, dtype=object) - result = self._box_col_values(values, label) + # this is a cached value, mark it so + result._set_as_cached(label, self) - # this is a cached value, mark it so - result._set_as_cached(label, self) - - return result + return result def __getitem__(self, key): key = lib.item_from_zerodim(key) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e19b1f70ce2f7..f28f58b070368 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3495,7 +3495,7 @@ def __delitem__(self, key): deleted = False maybe_shortcut = False - if hasattr(self, "columns") and isinstance(self.columns, MultiIndex): + if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: maybe_shortcut = key not in self.columns._engine except TypeError: @@ -5231,9 +5231,6 @@ def _dir_additions(self): } return super()._dir_additions().union(additions) - # ---------------------------------------------------------------------- - # Getting and setting elements - # ---------------------------------------------------------------------- # Consolidation of internals diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 6040385acbe40..482e9c365420c 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -143,10 +143,7 @@ def __getitem__(self, key): key = com.apply_if_callable(key, self.obj) return self._getitem_axis(key, axis=axis) - def _get_label(self, label, axis=None): - if axis is None: - axis = self.axis or 0 - + def _get_label(self, label, axis: int): if self.ndim == 1: # for perf reasons we want to try _xs first # as its basically direct indexing @@ -158,12 +155,10 @@ def _get_label(self, label, axis=None): return self.obj._xs(label, axis=axis) - def _get_loc(self, key, axis: int): + def _get_loc(self, key: int, axis: int): return self.obj._ixs(key, axis=axis) - def _slice(self, obj, axis=None, kind=None): - if axis is None: - axis = self.axis + def _slice(self, obj, axis: int, kind=None): return self.obj._slice(obj, axis=axis, kind=kind) def _get_setitem_indexer(self, key): @@ -330,19 +325,6 @@ def _setitem_with_indexer(self, indexer, value): val = list(value.values()) if isinstance(value, dict) else value take_split_path = not blk._can_hold_element(val) - if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): - - for i, ax in zip(indexer, self.obj.axes): - - # if we have any multi-indexes that have non-trivial slices - # (not null slices) then we must take the split path, xref - # GH 10360 - if isinstance(ax, MultiIndex) and not ( - is_integer(i) or com.is_null_slice(i) - ): - take_split_path = True - break - if isinstance(indexer, tuple): nindexer = [] for i, idx in enumerate(indexer): @@ -406,71 +388,16 @@ def _setitem_with_indexer(self, indexer, value): indexer, missing = convert_missing_indexer(indexer) if missing: - - # reindex the axis to the new value - # and set inplace - if self.ndim == 1: - index = self.obj.index - new_index = index.insert(len(index), indexer) - - # we have a coerced indexer, e.g. a float - # that matches in an Int64Index, so - # we will not create a duplicate index, rather - # index to that element - # e.g. 0.0 -> 0 - # GH12246 - if index.is_unique: - new_indexer = index.get_indexer([new_index[-1]]) - if (new_indexer != -1).any(): - return self._setitem_with_indexer(new_indexer, value) - - # this preserves dtype of the value - new_values = Series([value])._values - if len(self.obj._values): - # GH#22717 handle casting compatibility that np.concatenate - # does incorrectly - new_values = _concat_compat([self.obj._values, new_values]) - self.obj._data = self.obj._constructor( - new_values, index=new_index, name=self.obj.name - )._data - self.obj._maybe_update_cacher(clear=True) - return self.obj - - elif self.ndim == 2: - - # no columns and scalar - if not len(self.obj.columns): - raise ValueError( - "cannot set a frame with no defined " "columns" - ) - - # append a Series - if isinstance(value, Series): - - value = value.reindex(index=self.obj.columns, copy=True) - value.name = indexer - - # a list-list - else: - - # must have conforming columns - if is_list_like_indexer(value): - if len(value) != len(self.obj.columns): - raise ValueError( - "cannot set a row with " "mismatched columns" - ) - - value = Series(value, index=self.obj.columns, name=indexer) - - self.obj._data = self.obj.append(value)._data - self.obj._maybe_update_cacher(clear=True) - return self.obj + return self._setitem_with_indexer_missing(indexer, value) # set item_labels = self.obj._get_axis(info_axis) # align and set the values if take_split_path: + # Above we only set take_split_path to True for 2D cases + assert self.ndim == 2 + assert info_axis == 1 if not isinstance(indexer, tuple): indexer = self._tuplify(indexer) @@ -524,11 +451,8 @@ def _setitem_with_indexer(self, indexer, value): # non-mi else: plane_indexer = indexer[:info_axis] + indexer[info_axis + 1 :] - if info_axis > 0: - plane_axis = self.obj.axes[:info_axis][0] - lplane_indexer = length_of_indexer(plane_indexer[0], plane_axis) - else: - lplane_indexer = 0 + plane_axis = self.obj.axes[:info_axis][0] + lplane_indexer = length_of_indexer(plane_indexer[0], plane_axis) def setter(item, v): s = self.obj[item] @@ -578,9 +502,7 @@ def setter(item, v): # hasattr first, to avoid coercing to ndarray without reason. # But we may be relying on the ndarray coercion to check ndim. # Why not just convert to an ndarray earlier on if needed? - elif (hasattr(value, "ndim") and value.ndim == 2) or ( - not hasattr(value, "ndim") and np.array(value).ndim - ) == 2: + elif np.ndim(value) == 2: # note that this coerces the dtype if we are mixed # GH 7551 @@ -656,6 +578,65 @@ def setter(item, v): self.obj._data = self.obj._data.setitem(indexer=indexer, value=value) self.obj._maybe_update_cacher(clear=True) + def _setitem_with_indexer_missing(self, indexer, value): + """ + Insert new row(s) or column(s) into the Series or DataFrame. + """ + from pandas import Series + + # reindex the axis to the new value + # and set inplace + if self.ndim == 1: + index = self.obj.index + new_index = index.insert(len(index), indexer) + + # we have a coerced indexer, e.g. a float + # that matches in an Int64Index, so + # we will not create a duplicate index, rather + # index to that element + # e.g. 0.0 -> 0 + # GH#12246 + if index.is_unique: + new_indexer = index.get_indexer([new_index[-1]]) + if (new_indexer != -1).any(): + return self._setitem_with_indexer(new_indexer, value) + + # this preserves dtype of the value + new_values = Series([value])._values + if len(self.obj._values): + # GH#22717 handle casting compatibility that np.concatenate + # does incorrectly + new_values = _concat_compat([self.obj._values, new_values]) + self.obj._data = self.obj._constructor( + new_values, index=new_index, name=self.obj.name + )._data + self.obj._maybe_update_cacher(clear=True) + return self.obj + + elif self.ndim == 2: + + if not len(self.obj.columns): + # no columns and scalar + raise ValueError("cannot set a frame with no defined columns") + + if isinstance(value, ABCSeries): + # append a Series + value = value.reindex(index=self.obj.columns, copy=True) + value.name = indexer + + else: + # a list-list + if is_list_like_indexer(value): + # must have conforming columns + if len(value) != len(self.obj.columns): + raise ValueError("cannot set a row with mismatched columns") + + value = Series(value, index=self.obj.columns, name=indexer) + + self.obj._data = self.obj.append(value)._data + self.obj._maybe_update_cacher(clear=True) + return self.obj + def _align_series(self, indexer, ser, multiindex_indexer=False): """ Parameters @@ -820,9 +801,6 @@ def _getitem_tuple(self, tup): # no shortcut needed retval = self.obj for i, key in enumerate(tup): - if i >= self.obj.ndim: - raise IndexingError("Too many indexers") - if com.is_null_slice(key): continue @@ -882,10 +860,10 @@ def _convert_for_reindex(self, key, axis: int): def _handle_lowerdim_multi_index_axis0(self, tup): # we have an axis0 multi-index, handle or raise - + axis = self.axis or 0 try: # fast path for series or for tup devoid of slices - return self._get_label(tup, axis=self.axis) + return self._get_label(tup, axis=axis) except TypeError: # slices are unhashable pass @@ -983,7 +961,8 @@ def _getitem_nested_tuple(self, tup): # this is a series with a multi-index specified a tuple of # selectors - return self._getitem_axis(tup, axis=self.axis) + axis = self.axis or 0 + return self._getitem_axis(tup, axis=axis) # handle the multi-axis by taking sections and reducing # this is iterative @@ -1010,11 +989,7 @@ def _getitem_nested_tuple(self, tup): return obj - def _getitem_axis(self, key, axis=None): - - if axis is None: - axis = self.axis or 0 - + def _getitem_axis(self, key, axis: int): if is_iterator(key): key = list(key) self._validate_key(key, axis) @@ -1439,7 +1414,7 @@ def _is_scalar_access(self, key): def _getitem_scalar(self, key): raise NotImplementedError() - def _getitem_axis(self, key, axis=None): + def _getitem_axis(self, key, axis: int): raise NotImplementedError() def _getbool_axis(self, key, axis: int): @@ -1786,10 +1761,7 @@ def _get_partial_string_timestamp_match_key(self, key, labels): return key - def _getitem_axis(self, key, axis=None): - if axis is None: - axis = self.axis or 0 - + def _getitem_axis(self, key, axis: int): key = item_from_zerodim(key) if is_iterator(key): key = list(key) @@ -2106,9 +2078,6 @@ def _getitem_tuple(self, tup): retval = self.obj axis = 0 for i, key in enumerate(tup): - if i >= self.obj.ndim: - raise IndexingError("Too many indexers") - if com.is_null_slice(key): axis += 1 continue @@ -2143,10 +2112,7 @@ def _get_list_axis(self, key, axis: int): # re-raise with different error message raise IndexError("positional indexers are out-of-bounds") - def _getitem_axis(self, key, axis=None): - if axis is None: - axis = self.axis or 0 - + def _getitem_axis(self, key, axis: int): if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) diff --git a/pandas/core/series.py b/pandas/core/series.py index 73a71a2a41f4c..6a58b1ea6f82d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1028,38 +1028,25 @@ def axes(self): """ return [self.index] - def _ixs(self, i, axis=0): + def _ixs(self, i: int, axis: int = 0): """ Return the i-th value or values in the Series by location. Parameters ---------- - i : int, slice, or sequence of integers + i : int Returns ------- scalar (int) or Series (slice, sequence) """ - try: - # dispatch to the values if we need - values = self._values - if isinstance(values, np.ndarray): - return libindex.get_value_at(values, i) - else: - return values[i] - except IndexError: - raise - except Exception: - if isinstance(i, slice): - indexer = self.index._convert_slice_indexer(i, kind="iloc") - return self._get_values(indexer) - else: - label = self.index[i] - if isinstance(label, Index): - return self.take(i, axis=axis, convert=True) - else: - return libindex.get_value_at(self, i) + # dispatch to the values if we need + values = self._values + if isinstance(values, np.ndarray): + return libindex.get_value_at(values, i) + else: + return values[i] @property def _is_mixed_type(self):
https://api.github.com/repos/pandas-dev/pandas/pulls/27351
2019-07-11T23:49:43Z
2019-07-12T16:09:00Z
2019-07-12T16:09:00Z
2019-07-12T16:10:16Z
CLN: Collapse private ._take implementation into the public take method
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 55a9eb6a0810a..263c4013de281 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2937,7 +2937,7 @@ def _ixs(self, i, axis=0): return self.loc[:, lab_slice] else: if isinstance(label, Index): - return self._take(i, axis=1) + return self.take(i, axis=1) index_len = len(self.index) @@ -2999,7 +2999,7 @@ def __getitem__(self, key): if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] - data = self._take(indexer, axis=1) + data = self.take(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? @@ -3032,7 +3032,7 @@ def _getitem_bool_array(self, key): # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] - return self._take(indexer, axis=0) + return self.take(indexer, axis=0) def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0e2253aed1c88..e19b1f70ce2f7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3294,7 +3294,7 @@ def _iget_item_cache(self, item): if ax.is_unique: lower = self._get_item_cache(ax[item]) else: - lower = self._take(item, axis=self._info_axis_number) + lower = self.take(item, axis=self._info_axis_number) return lower def _box_item_values(self, key, values): @@ -3522,52 +3522,6 @@ def __delitem__(self, key): except KeyError: pass - def _take(self, indices, axis=0, is_copy=True): - """ - Return the elements in the given *positional* indices along an axis. - - This means that we are not indexing according to actual values in - the index attribute of the object. We are indexing according to the - actual position of the element in the object. - - This is the internal version of ``.take()`` and will contain a wider - selection of parameters useful for internal use but not as suitable - for public usage. - - Parameters - ---------- - indices : array-like - An array of ints indicating which positions to take. - axis : int, default 0 - The axis on which to select elements. "0" means that we are - selecting rows, "1" means that we are selecting columns, etc. - is_copy : bool, default True - Whether to return a copy of the original object or not. - - Returns - ------- - taken : same type as caller - An array-like containing the elements taken from the object. - - See Also - -------- - numpy.ndarray.take - numpy.take - """ - self._consolidate_inplace() - - new_data = self._data.take( - indices, axis=self._get_block_manager_axis(axis), verify=True - ) - result = self._constructor(new_data).__finalize__(self) - - # Maybe set copy if we didn't actually change the index. - if is_copy: - if not result._get_axis(axis).equals(self._get_axis(axis)): - result._set_is_copy(self) - - return result - def take(self, indices, axis=0, is_copy=True, **kwargs): """ Return the elements in the given *positional* indices along an axis. @@ -3644,7 +3598,20 @@ class max_speed 3 lion mammal 80.5 """ nv.validate_take(tuple(), kwargs) - return self._take(indices, axis=axis, is_copy=is_copy) + + self._consolidate_inplace() + + new_data = self._data.take( + indices, axis=self._get_block_manager_axis(axis), verify=True + ) + result = self._constructor(new_data).__finalize__(self) + + # Maybe set copy if we didn't actually change the index. + if is_copy: + if not result._get_axis(axis).equals(self._get_axis(axis)): + result._set_is_copy(self) + + return result def xs(self, key, axis=0, level=None, drop_level=True): """ @@ -3773,9 +3740,9 @@ class animal locomotion if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: inds, = loc.nonzero() - return self._take(inds, axis=axis) + return self.take(inds, axis=axis) else: - return self._take(loc, axis=axis) + return self.take(loc, axis=axis) if not is_scalar(loc): new_index = self.index[loc] @@ -8091,7 +8058,7 @@ def at_time(self, time, asof=False, axis=None): except AttributeError: raise TypeError("Index must be DatetimeIndex") - return self._take(indexer, axis=axis) + return self.take(indexer, axis=axis) def between_time( self, start_time, end_time, include_start=True, include_end=True, axis=None @@ -8168,7 +8135,7 @@ def between_time( except AttributeError: raise TypeError("Index must be DatetimeIndex") - return self._take(indexer, axis=axis) + return self.take(indexer, axis=axis) def resample( self, diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index aa71fd68086fb..9aba9723e0546 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -679,7 +679,7 @@ def get_group(self, name, obj=None): if not len(inds): raise KeyError(name) - return obj._take(inds, axis=self.axis) + return obj.take(inds, axis=self.axis) def __iter__(self): """ diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 3cf358261e685..a127d092b7b1a 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -194,7 +194,7 @@ def _set_grouper(self, obj, sort=False): # use stable sort to support first, last, nth indexer = self.indexer = ax.argsort(kind="mergesort") ax = ax.take(indexer) - obj = obj._take(indexer, axis=self.axis, is_copy=False) + obj = obj.take(indexer, axis=self.axis, is_copy=False) self.obj = obj self.grouper = ax diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 33341a489866b..e341a66bb7459 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -675,7 +675,7 @@ def _aggregate_series_fast(self, obj, func): # avoids object / Series creation overhead dummy = obj._get_values(slice(None, 0)) indexer = get_group_index_sorter(group_index, ngroups) - obj = obj._take(indexer) + obj = obj.take(indexer) group_index = algorithms.take_nd(group_index, indexer, allow_fill=False) grouper = reduction.SeriesGrouper(obj, func, group_index, ngroups, dummy) result, counts = grouper.get_result() @@ -915,7 +915,7 @@ def __iter__(self): yield i, self._chop(sdata, slice(start, end)) def _get_sorted_data(self): - return self.data._take(self.sort_idx, axis=self.axis) + return self.data.take(self.sort_idx, axis=self.axis) def _chop(self, sdata, slice_obj): return sdata.iloc[slice_obj] diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 01f338a021cec..6040385acbe40 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1137,7 +1137,7 @@ def _getitem_iterable(self, key, axis: int): # A boolean indexer key = check_bool_indexer(labels, key) inds, = key.nonzero() - return self.obj._take(inds, axis=axis) + return self.obj.take(inds, axis=axis) else: # A collection of keys keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False) @@ -1448,7 +1448,7 @@ def _getbool_axis(self, key, axis: int): key = check_bool_indexer(labels, key) inds, = key.nonzero() try: - return self.obj._take(inds, axis=axis) + return self.obj.take(inds, axis=axis) except Exception as detail: raise self._exception(detail) @@ -1469,7 +1469,7 @@ def _get_slice_axis(self, slice_obj: slice, axis: int): else: # DatetimeIndex overrides Index.slice_indexer and may # return a DatetimeIndex instead of a slice object. - return self.obj._take(indexer, axis=axis) + return self.obj.take(indexer, axis=axis) class _LocIndexer(_LocationIndexer): @@ -2138,7 +2138,7 @@ def _get_list_axis(self, key, axis: int): Series object """ try: - return self.obj._take(key, axis=axis) + return self.obj.take(key, axis=axis) except IndexError: # re-raise with different error message raise IndexError("positional indexers are out-of-bounds") diff --git a/pandas/core/series.py b/pandas/core/series.py index acb0826953508..73a71a2a41f4c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4371,8 +4371,9 @@ def memory_usage(self, index=True, deep=False): v += self.index.memory_usage(deep=deep) return v - @Appender(generic.NDFrame._take.__doc__) - def _take(self, indices, axis=0, is_copy=False): + @Appender(generic.NDFrame.take.__doc__) + def take(self, indices, axis=0, is_copy=False, **kwargs): + nv.validate_take(tuple(), kwargs) indices = ensure_platform_int(indices) new_index = self.index.take(indices)
- [x] closes #27174 - [ ] tests added / passed - [X] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Getting rid the `_take` private method that seems to be no longer needed.
https://api.github.com/repos/pandas-dev/pandas/pulls/27349
2019-07-11T21:54:48Z
2019-07-12T14:26:55Z
2019-07-12T14:26:54Z
2019-07-12T14:32:33Z
CLN: Split test_window.py further
diff --git a/pandas/tests/window/common.py b/pandas/tests/window/common.py new file mode 100644 index 0000000000000..1dfc0f34b2b8d --- /dev/null +++ b/pandas/tests/window/common.py @@ -0,0 +1,23 @@ +from datetime import datetime + +import numpy as np +from numpy.random import randn + +from pandas import DataFrame, Series, bdate_range + +N, K = 100, 10 + + +class Base: + + _nan_locs = np.arange(20, 40) + _inf_locs = np.array([]) + + def _create_data(self): + arr = randn(N) + arr[self._nan_locs] = np.NaN + + self.arr = arr + self.rng = bdate_range(datetime(2009, 1, 1), periods=N) + self.series = Series(arr.copy(), index=self.rng) + self.frame = DataFrame(randn(N, K), index=self.rng, columns=np.arange(K)) diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py new file mode 100644 index 0000000000000..11527efa4c39f --- /dev/null +++ b/pandas/tests/window/test_api.py @@ -0,0 +1,367 @@ +from collections import OrderedDict +import warnings +from warnings import catch_warnings + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import DataFrame, Index, Series, Timestamp, concat +from pandas.core.base import SpecificationError +from pandas.tests.window.common import Base +import pandas.util.testing as tm + + +class TestApi(Base): + def setup_method(self, method): + self._create_data() + + def test_getitem(self): + + r = self.frame.rolling(window=5) + tm.assert_index_equal(r._selected_obj.columns, self.frame.columns) + + r = self.frame.rolling(window=5)[1] + assert r._selected_obj.name == self.frame.columns[1] + + # technically this is allowed + r = self.frame.rolling(window=5)[1, 3] + tm.assert_index_equal(r._selected_obj.columns, self.frame.columns[[1, 3]]) + + r = self.frame.rolling(window=5)[[1, 3]] + tm.assert_index_equal(r._selected_obj.columns, self.frame.columns[[1, 3]]) + + def test_select_bad_cols(self): + df = DataFrame([[1, 2]], columns=["A", "B"]) + g = df.rolling(window=5) + with pytest.raises(KeyError, match="Columns not found: 'C'"): + g[["C"]] + with pytest.raises(KeyError, match="^[^A]+$"): + # A should not be referenced as a bad column... + # will have to rethink regex if you change message! + g[["A", "C"]] + + def test_attribute_access(self): + + df = DataFrame([[1, 2]], columns=["A", "B"]) + r = df.rolling(window=5) + tm.assert_series_equal(r.A.sum(), r["A"].sum()) + msg = "'Rolling' object has no attribute 'F'" + with pytest.raises(AttributeError, match=msg): + r.F + + def tests_skip_nuisance(self): + + df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"}) + r = df.rolling(window=3) + result = r[["A", "B"]].sum() + expected = DataFrame( + {"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]}, + columns=list("AB"), + ) + tm.assert_frame_equal(result, expected) + + def test_skip_sum_object_raises(self): + df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"}) + r = df.rolling(window=3) + result = r.sum() + expected = DataFrame( + {"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]}, + columns=list("AB"), + ) + tm.assert_frame_equal(result, expected) + + def test_agg(self): + df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) + + r = df.rolling(window=3) + a_mean = r["A"].mean() + a_std = r["A"].std() + a_sum = r["A"].sum() + b_mean = r["B"].mean() + b_std = r["B"].std() + b_sum = r["B"].sum() + + result = r.aggregate([np.mean, np.std]) + expected = concat([a_mean, a_std, b_mean, b_std], axis=1) + expected.columns = pd.MultiIndex.from_product([["A", "B"], ["mean", "std"]]) + tm.assert_frame_equal(result, expected) + + result = r.aggregate({"A": np.mean, "B": np.std}) + + expected = concat([a_mean, b_std], axis=1) + tm.assert_frame_equal(result, expected, check_like=True) + + result = r.aggregate({"A": ["mean", "std"]}) + expected = concat([a_mean, a_std], axis=1) + expected.columns = pd.MultiIndex.from_tuples([("A", "mean"), ("A", "std")]) + tm.assert_frame_equal(result, expected) + + result = r["A"].aggregate(["mean", "sum"]) + expected = concat([a_mean, a_sum], axis=1) + expected.columns = ["mean", "sum"] + tm.assert_frame_equal(result, expected) + + with catch_warnings(record=True): + # using a dict with renaming + warnings.simplefilter("ignore", FutureWarning) + result = r.aggregate({"A": {"mean": "mean", "sum": "sum"}}) + expected = concat([a_mean, a_sum], axis=1) + expected.columns = pd.MultiIndex.from_tuples([("A", "mean"), ("A", "sum")]) + tm.assert_frame_equal(result, expected, check_like=True) + + with catch_warnings(record=True): + warnings.simplefilter("ignore", FutureWarning) + result = r.aggregate( + { + "A": {"mean": "mean", "sum": "sum"}, + "B": {"mean2": "mean", "sum2": "sum"}, + } + ) + expected = concat([a_mean, a_sum, b_mean, b_sum], axis=1) + exp_cols = [("A", "mean"), ("A", "sum"), ("B", "mean2"), ("B", "sum2")] + expected.columns = pd.MultiIndex.from_tuples(exp_cols) + tm.assert_frame_equal(result, expected, check_like=True) + + result = r.aggregate({"A": ["mean", "std"], "B": ["mean", "std"]}) + expected = concat([a_mean, a_std, b_mean, b_std], axis=1) + + exp_cols = [("A", "mean"), ("A", "std"), ("B", "mean"), ("B", "std")] + expected.columns = pd.MultiIndex.from_tuples(exp_cols) + tm.assert_frame_equal(result, expected, check_like=True) + + def test_agg_apply(self, raw): + + # passed lambda + df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) + + r = df.rolling(window=3) + a_sum = r["A"].sum() + + result = r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)}) + rcustom = r["B"].apply(lambda x: np.std(x, ddof=1), raw=raw) + expected = concat([a_sum, rcustom], axis=1) + tm.assert_frame_equal(result, expected, check_like=True) + + def test_agg_consistency(self): + + df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) + r = df.rolling(window=3) + + result = r.agg([np.sum, np.mean]).columns + expected = pd.MultiIndex.from_product([list("AB"), ["sum", "mean"]]) + tm.assert_index_equal(result, expected) + + result = r["A"].agg([np.sum, np.mean]).columns + expected = Index(["sum", "mean"]) + tm.assert_index_equal(result, expected) + + result = r.agg({"A": [np.sum, np.mean]}).columns + expected = pd.MultiIndex.from_tuples([("A", "sum"), ("A", "mean")]) + tm.assert_index_equal(result, expected) + + def test_agg_nested_dicts(self): + + # API change for disallowing these types of nested dicts + df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) + r = df.rolling(window=3) + + msg = r"cannot perform renaming for (r1|r2) with a nested dictionary" + with pytest.raises(SpecificationError, match=msg): + r.aggregate({"r1": {"A": ["mean", "sum"]}, "r2": {"B": ["mean", "sum"]}}) + + expected = concat( + [r["A"].mean(), r["A"].std(), r["B"].mean(), r["B"].std()], axis=1 + ) + expected.columns = pd.MultiIndex.from_tuples( + [("ra", "mean"), ("ra", "std"), ("rb", "mean"), ("rb", "std")] + ) + with catch_warnings(record=True): + warnings.simplefilter("ignore", FutureWarning) + result = r[["A", "B"]].agg( + {"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}} + ) + tm.assert_frame_equal(result, expected, check_like=True) + + with catch_warnings(record=True): + warnings.simplefilter("ignore", FutureWarning) + result = r.agg({"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}}) + expected.columns = pd.MultiIndex.from_tuples( + [ + ("A", "ra", "mean"), + ("A", "ra", "std"), + ("B", "rb", "mean"), + ("B", "rb", "std"), + ] + ) + tm.assert_frame_equal(result, expected, check_like=True) + + def test_count_nonnumeric_types(self): + # GH12541 + cols = [ + "int", + "float", + "string", + "datetime", + "timedelta", + "periods", + "fl_inf", + "fl_nan", + "str_nan", + "dt_nat", + "periods_nat", + ] + + df = DataFrame( + { + "int": [1, 2, 3], + "float": [4.0, 5.0, 6.0], + "string": list("abc"), + "datetime": pd.date_range("20170101", periods=3), + "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"), + "periods": [ + pd.Period("2012-01"), + pd.Period("2012-02"), + pd.Period("2012-03"), + ], + "fl_inf": [1.0, 2.0, np.Inf], + "fl_nan": [1.0, 2.0, np.NaN], + "str_nan": ["aa", "bb", np.NaN], + "dt_nat": [ + Timestamp("20170101"), + Timestamp("20170203"), + Timestamp(None), + ], + "periods_nat": [ + pd.Period("2012-01"), + pd.Period("2012-02"), + pd.Period(None), + ], + }, + columns=cols, + ) + + expected = DataFrame( + { + "int": [1.0, 2.0, 2.0], + "float": [1.0, 2.0, 2.0], + "string": [1.0, 2.0, 2.0], + "datetime": [1.0, 2.0, 2.0], + "timedelta": [1.0, 2.0, 2.0], + "periods": [1.0, 2.0, 2.0], + "fl_inf": [1.0, 2.0, 2.0], + "fl_nan": [1.0, 2.0, 1.0], + "str_nan": [1.0, 2.0, 1.0], + "dt_nat": [1.0, 2.0, 1.0], + "periods_nat": [1.0, 2.0, 1.0], + }, + columns=cols, + ) + + result = df.rolling(window=2).count() + tm.assert_frame_equal(result, expected) + + result = df.rolling(1).count() + expected = df.notna().astype(float) + tm.assert_frame_equal(result, expected) + + @td.skip_if_no_scipy + @pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning") + def test_window_with_args(self): + # make sure that we are aggregating window functions correctly with arg + r = Series(np.random.randn(100)).rolling( + window=10, min_periods=1, win_type="gaussian" + ) + expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) + expected.columns = ["<lambda>", "<lambda>"] + result = r.aggregate([lambda x: x.mean(std=10), lambda x: x.mean(std=0.01)]) + tm.assert_frame_equal(result, expected) + + def a(x): + return x.mean(std=10) + + def b(x): + return x.mean(std=0.01) + + expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) + expected.columns = ["a", "b"] + result = r.aggregate([a, b]) + tm.assert_frame_equal(result, expected) + + def test_preserve_metadata(self): + # GH 10565 + s = Series(np.arange(100), name="foo") + + s2 = s.rolling(30).sum() + s3 = s.rolling(20).sum() + assert s2.name == "foo" + assert s3.name == "foo" + + @pytest.mark.parametrize( + "func,window_size,expected_vals", + [ + ( + "rolling", + 2, + [ + [np.nan, np.nan, np.nan, np.nan], + [15.0, 20.0, 25.0, 20.0], + [25.0, 30.0, 35.0, 30.0], + [np.nan, np.nan, np.nan, np.nan], + [20.0, 30.0, 35.0, 30.0], + [35.0, 40.0, 60.0, 40.0], + [60.0, 80.0, 85.0, 80], + ], + ), + ( + "expanding", + None, + [ + [10.0, 10.0, 20.0, 20.0], + [15.0, 20.0, 25.0, 20.0], + [20.0, 30.0, 30.0, 20.0], + [10.0, 10.0, 30.0, 30.0], + [20.0, 30.0, 35.0, 30.0], + [26.666667, 40.0, 50.0, 30.0], + [40.0, 80.0, 60.0, 30.0], + ], + ), + ], + ) + def test_multiple_agg_funcs(self, func, window_size, expected_vals): + # GH 15072 + df = pd.DataFrame( + [ + ["A", 10, 20], + ["A", 20, 30], + ["A", 30, 40], + ["B", 10, 30], + ["B", 30, 40], + ["B", 40, 80], + ["B", 80, 90], + ], + columns=["stock", "low", "high"], + ) + + f = getattr(df.groupby("stock"), func) + if window_size: + window = f(window_size) + else: + window = f() + + index = pd.MultiIndex.from_tuples( + [("A", 0), ("A", 1), ("A", 2), ("B", 3), ("B", 4), ("B", 5), ("B", 6)], + names=["stock", None], + ) + columns = pd.MultiIndex.from_tuples( + [("low", "mean"), ("low", "max"), ("high", "mean"), ("high", "min")] + ) + expected = pd.DataFrame(expected_vals, index=index, columns=columns) + + result = window.agg( + OrderedDict((("low", ["mean", "max"]), ("high", ["mean", "min"]))) + ) + + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py new file mode 100644 index 0000000000000..a05b567adad7a --- /dev/null +++ b/pandas/tests/window/test_ewm.py @@ -0,0 +1,70 @@ +import numpy as np +import pytest + +from pandas.errors import UnsupportedFunctionCall + +from pandas import DataFrame, Series +import pandas.core.window as rwindow +from pandas.tests.window.common import Base + + +class TestEWM(Base): + def setup_method(self, method): + self._create_data() + + def test_doc_string(self): + + df = DataFrame({"B": [0, 1, 2, np.nan, 4]}) + df + df.ewm(com=0.5).mean() + + @pytest.mark.parametrize("which", ["series", "frame"]) + def test_constructor(self, which): + o = getattr(self, which) + c = o.ewm + + # valid + c(com=0.5) + c(span=1.5) + c(alpha=0.5) + c(halflife=0.75) + c(com=0.5, span=None) + c(alpha=0.5, com=None) + c(halflife=0.75, alpha=None) + + # not valid: mutually exclusive + with pytest.raises(ValueError): + c(com=0.5, alpha=0.5) + with pytest.raises(ValueError): + c(span=1.5, halflife=0.75) + with pytest.raises(ValueError): + c(alpha=0.5, span=1.5) + + # not valid: com < 0 + with pytest.raises(ValueError): + c(com=-0.5) + + # not valid: span < 1 + with pytest.raises(ValueError): + c(span=0.5) + + # not valid: halflife <= 0 + with pytest.raises(ValueError): + c(halflife=0) + + # not valid: alpha <= 0 or alpha > 1 + for alpha in (-0.5, 1.5): + with pytest.raises(ValueError): + c(alpha=alpha) + + @pytest.mark.parametrize("method", ["std", "mean", "var"]) + def test_numpy_compat(self, method): + # see gh-12811 + e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5) + + msg = "numpy operations are not valid with window objects" + + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(e, method)(1, 2, 3) + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(e, method)(dtype=np.float64) diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py new file mode 100644 index 0000000000000..1e92c981964c5 --- /dev/null +++ b/pandas/tests/window/test_expanding.py @@ -0,0 +1,115 @@ +import numpy as np +import pytest + +from pandas.errors import UnsupportedFunctionCall + +import pandas as pd +from pandas import DataFrame, Series +import pandas.core.window as rwindow +from pandas.tests.window.common import Base +import pandas.util.testing as tm + + +class TestExpanding(Base): + def setup_method(self, method): + self._create_data() + + def test_doc_string(self): + + df = DataFrame({"B": [0, 1, 2, np.nan, 4]}) + df + df.expanding(2).sum() + + @pytest.mark.parametrize("which", ["series", "frame"]) + def test_constructor(self, which): + # GH 12669 + + o = getattr(self, which) + c = o.expanding + + # valid + c(min_periods=1) + c(min_periods=1, center=True) + c(min_periods=1, center=False) + + # not valid + for w in [2.0, "foo", np.array([2])]: + with pytest.raises(ValueError): + c(min_periods=w) + with pytest.raises(ValueError): + c(min_periods=1, center=w) + + @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) + def test_numpy_compat(self, method): + # see gh-12811 + e = rwindow.Expanding(Series([2, 4, 6]), window=2) + + msg = "numpy operations are not valid with window objects" + + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(e, method)(1, 2, 3) + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(e, method)(dtype=np.float64) + + @pytest.mark.parametrize( + "expander", + [ + 1, + pytest.param( + "ls", + marks=pytest.mark.xfail( + reason="GH#16425 expanding with offset not supported" + ), + ), + ], + ) + def test_empty_df_expanding(self, expander): + # GH 15819 Verifies that datetime and integer expanding windows can be + # applied to empty DataFrames + + expected = DataFrame() + result = DataFrame().expanding(expander).sum() + tm.assert_frame_equal(result, expected) + + # Verifies that datetime and integer expanding windows can be applied + # to empty DataFrames with datetime index + expected = DataFrame(index=pd.DatetimeIndex([])) + result = DataFrame(index=pd.DatetimeIndex([])).expanding(expander).sum() + tm.assert_frame_equal(result, expected) + + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.expanding(min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.expanding(min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("klass", [pd.Series, pd.DataFrame]) + def test_iter_raises(self, klass): + # https://github.com/pandas-dev/pandas/issues/11704 + # Iteration over a Window + obj = klass([1, 2, 3, 4]) + with pytest.raises(NotImplementedError): + iter(obj.expanding(2)) + + def test_expanding_axis(self, axis_frame): + # see gh-23372. + df = DataFrame(np.ones((10, 20))) + axis = df._get_axis_number(axis_frame) + + if axis == 0: + expected = DataFrame( + {i: [np.nan] * 2 + [float(j) for j in range(3, 11)] for i in range(20)} + ) + else: + # axis == 1 + expected = DataFrame([[np.nan] * 2 + [float(i) for i in range(3, 21)]] * 10) + + result = df.expanding(3, axis=axis_frame).sum() + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py new file mode 100644 index 0000000000000..b726bd3e3c8a7 --- /dev/null +++ b/pandas/tests/window/test_grouper.py @@ -0,0 +1,176 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame, Series +import pandas.util.testing as tm + + +class TestGrouperGrouping: + def setup_method(self, method): + self.series = Series(np.arange(10)) + self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}) + + def test_mutated(self): + + msg = r"group\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + self.frame.groupby("A", foo=1) + + g = self.frame.groupby("A") + assert not g.mutated + g = self.frame.groupby("A", mutated=True) + assert g.mutated + + def test_getitem(self): + g = self.frame.groupby("A") + g_mutated = self.frame.groupby("A", mutated=True) + + expected = g_mutated.B.apply(lambda x: x.rolling(2).mean()) + + result = g.rolling(2).mean().B + tm.assert_series_equal(result, expected) + + result = g.rolling(2).B.mean() + tm.assert_series_equal(result, expected) + + result = g.B.rolling(2).mean() + tm.assert_series_equal(result, expected) + + result = self.frame.B.groupby(self.frame.A).rolling(2).mean() + tm.assert_series_equal(result, expected) + + def test_getitem_multiple(self): + + # GH 13174 + g = self.frame.groupby("A") + r = g.rolling(2) + g_mutated = self.frame.groupby("A", mutated=True) + expected = g_mutated.B.apply(lambda x: x.rolling(2).count()) + + result = r.B.count() + tm.assert_series_equal(result, expected) + + result = r.B.count() + tm.assert_series_equal(result, expected) + + def test_rolling(self): + g = self.frame.groupby("A") + r = g.rolling(window=4) + + for f in ["sum", "mean", "min", "max", "count", "kurt", "skew"]: + + result = getattr(r, f)() + expected = g.apply(lambda x: getattr(x.rolling(4), f)()) + tm.assert_frame_equal(result, expected) + + for f in ["std", "var"]: + result = getattr(r, f)(ddof=1) + expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1)) + tm.assert_frame_equal(result, expected) + + result = r.quantile(0.5) + expected = g.apply(lambda x: x.rolling(4).quantile(0.5)) + tm.assert_frame_equal(result, expected) + + def test_rolling_corr_cov(self): + g = self.frame.groupby("A") + r = g.rolling(window=4) + + for f in ["corr", "cov"]: + result = getattr(r, f)(self.frame) + + def func(x): + return getattr(x.rolling(4), f)(self.frame) + + expected = g.apply(func) + tm.assert_frame_equal(result, expected) + + result = getattr(r.B, f)(pairwise=True) + + def func(x): + return getattr(x.B.rolling(4), f)(pairwise=True) + + expected = g.apply(func) + tm.assert_series_equal(result, expected) + + def test_rolling_apply(self, raw): + g = self.frame.groupby("A") + r = g.rolling(window=4) + + # reduction + result = r.apply(lambda x: x.sum(), raw=raw) + expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) + tm.assert_frame_equal(result, expected) + + def test_rolling_apply_mutability(self): + # GH 14013 + df = pd.DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6}) + g = df.groupby("A") + + mi = pd.MultiIndex.from_tuples( + [("bar", 3), ("bar", 4), ("bar", 5), ("foo", 0), ("foo", 1), ("foo", 2)] + ) + + mi.names = ["A", None] + # Grouped column should not be a part of the output + expected = pd.DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi) + + result = g.rolling(window=2).sum() + tm.assert_frame_equal(result, expected) + + # Call an arbitrary function on the groupby + g.sum() + + # Make sure nothing has been mutated + result = g.rolling(window=2).sum() + tm.assert_frame_equal(result, expected) + + def test_expanding(self): + g = self.frame.groupby("A") + r = g.expanding() + + for f in ["sum", "mean", "min", "max", "count", "kurt", "skew"]: + + result = getattr(r, f)() + expected = g.apply(lambda x: getattr(x.expanding(), f)()) + tm.assert_frame_equal(result, expected) + + for f in ["std", "var"]: + result = getattr(r, f)(ddof=0) + expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0)) + tm.assert_frame_equal(result, expected) + + result = r.quantile(0.5) + expected = g.apply(lambda x: x.expanding().quantile(0.5)) + tm.assert_frame_equal(result, expected) + + def test_expanding_corr_cov(self): + g = self.frame.groupby("A") + r = g.expanding() + + for f in ["corr", "cov"]: + result = getattr(r, f)(self.frame) + + def func(x): + return getattr(x.expanding(), f)(self.frame) + + expected = g.apply(func) + tm.assert_frame_equal(result, expected) + + result = getattr(r.B, f)(pairwise=True) + + def func(x): + return getattr(x.B.expanding(), f)(pairwise=True) + + expected = g.apply(func) + tm.assert_series_equal(result, expected) + + def test_expanding_apply(self, raw): + g = self.frame.groupby("A") + r = g.expanding() + + # reduction + result = r.apply(lambda x: x.sum(), raw=raw) + expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw)) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/window/test_moments.py b/pandas/tests/window/test_moments.py new file mode 100644 index 0000000000000..d860859958254 --- /dev/null +++ b/pandas/tests/window/test_moments.py @@ -0,0 +1,2562 @@ +import copy +from datetime import datetime +import warnings + +import numpy as np +from numpy.random import randn +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import DataFrame, Index, Series, concat, isna, notna +import pandas.core.window as rwindow +from pandas.tests.window.common import Base +import pandas.util.testing as tm + +import pandas.tseries.offsets as offsets + + +@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning") +class TestMoments(Base): + def setup_method(self, method): + self._create_data() + + def test_centered_axis_validation(self): + + # ok + Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean() + + # bad axis + with pytest.raises(ValueError): + Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean() + + # ok ok + DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean() + DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean() + + # bad axis + with pytest.raises(ValueError): + (DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean()) + + def test_rolling_sum(self, raw): + self._check_moment_func( + np.nansum, name="sum", zero_min_periods_equal=False, raw=raw + ) + + def test_rolling_count(self, raw): + counter = lambda x: np.isfinite(x).astype(float).sum() + self._check_moment_func( + counter, name="count", has_min_periods=False, fill_value=0, raw=raw + ) + + def test_rolling_mean(self, raw): + self._check_moment_func(np.mean, name="mean", raw=raw) + + @td.skip_if_no_scipy + def test_cmov_mean(self): + # GH 8238 + vals = np.array( + [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48] + ) + result = Series(vals).rolling(5, center=True).mean() + expected = Series( + [ + np.nan, + np.nan, + 9.962, + 11.27, + 11.564, + 12.516, + 12.818, + 12.952, + np.nan, + np.nan, + ] + ) + tm.assert_series_equal(expected, result) + + @td.skip_if_no_scipy + def test_cmov_window(self): + # GH 8238 + vals = np.array( + [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48] + ) + result = Series(vals).rolling(5, win_type="boxcar", center=True).mean() + expected = Series( + [ + np.nan, + np.nan, + 9.962, + 11.27, + 11.564, + 12.516, + 12.818, + 12.952, + np.nan, + np.nan, + ] + ) + tm.assert_series_equal(expected, result) + + @td.skip_if_no_scipy + def test_cmov_window_corner(self): + # GH 8238 + # all nan + vals = pd.Series([np.nan] * 10) + result = vals.rolling(5, center=True, win_type="boxcar").mean() + assert np.isnan(result).all() + + # empty + vals = pd.Series([]) + result = vals.rolling(5, center=True, win_type="boxcar").mean() + assert len(result) == 0 + + # shorter than window + vals = pd.Series(np.random.randn(5)) + result = vals.rolling(10, win_type="boxcar").mean() + assert np.isnan(result).all() + assert len(result) == 5 + + @td.skip_if_no_scipy + def test_cmov_window_frame(self): + # Gh 8238 + vals = np.array( + [ + [12.18, 3.64], + [10.18, 9.16], + [13.24, 14.61], + [4.51, 8.11], + [6.15, 11.44], + [9.14, 6.21], + [11.31, 10.67], + [2.94, 6.51], + [9.42, 8.39], + [12.44, 7.34], + ] + ) + + xp = np.array( + [ + [np.nan, np.nan], + [np.nan, np.nan], + [9.252, 9.392], + [8.644, 9.906], + [8.87, 10.208], + [6.81, 8.588], + [7.792, 8.644], + [9.05, 7.824], + [np.nan, np.nan], + [np.nan, np.nan], + ] + ) + + # DataFrame + rs = DataFrame(vals).rolling(5, win_type="boxcar", center=True).mean() + tm.assert_frame_equal(DataFrame(xp), rs) + + # invalid method + with pytest.raises(AttributeError): + (DataFrame(vals).rolling(5, win_type="boxcar", center=True).std()) + + # sum + xp = np.array( + [ + [np.nan, np.nan], + [np.nan, np.nan], + [46.26, 46.96], + [43.22, 49.53], + [44.35, 51.04], + [34.05, 42.94], + [38.96, 43.22], + [45.25, 39.12], + [np.nan, np.nan], + [np.nan, np.nan], + ] + ) + + rs = DataFrame(vals).rolling(5, win_type="boxcar", center=True).sum() + tm.assert_frame_equal(DataFrame(xp), rs) + + @td.skip_if_no_scipy + def test_cmov_window_na_min_periods(self): + # min_periods + vals = Series(np.random.randn(10)) + vals[4] = np.nan + vals[8] = np.nan + + xp = vals.rolling(5, min_periods=4, center=True).mean() + rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean() + tm.assert_series_equal(xp, rs) + + @td.skip_if_no_scipy + def test_cmov_window_regular(self, win_types): + # GH 8238 + vals = np.array( + [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48] + ) + xps = { + "hamming": [ + np.nan, + np.nan, + 8.71384, + 9.56348, + 12.38009, + 14.03687, + 13.8567, + 11.81473, + np.nan, + np.nan, + ], + "triang": [ + np.nan, + np.nan, + 9.28667, + 10.34667, + 12.00556, + 13.33889, + 13.38, + 12.33667, + np.nan, + np.nan, + ], + "barthann": [ + np.nan, + np.nan, + 8.4425, + 9.1925, + 12.5575, + 14.3675, + 14.0825, + 11.5675, + np.nan, + np.nan, + ], + "bohman": [ + np.nan, + np.nan, + 7.61599, + 9.1764, + 12.83559, + 14.17267, + 14.65923, + 11.10401, + np.nan, + np.nan, + ], + "blackmanharris": [ + np.nan, + np.nan, + 6.97691, + 9.16438, + 13.05052, + 14.02156, + 15.10512, + 10.74574, + np.nan, + np.nan, + ], + "nuttall": [ + np.nan, + np.nan, + 7.04618, + 9.16786, + 13.02671, + 14.03559, + 15.05657, + 10.78514, + np.nan, + np.nan, + ], + "blackman": [ + np.nan, + np.nan, + 7.73345, + 9.17869, + 12.79607, + 14.20036, + 14.57726, + 11.16988, + np.nan, + np.nan, + ], + "bartlett": [ + np.nan, + np.nan, + 8.4425, + 9.1925, + 12.5575, + 14.3675, + 14.0825, + 11.5675, + np.nan, + np.nan, + ], + } + + xp = Series(xps[win_types]) + rs = Series(vals).rolling(5, win_type=win_types, center=True).mean() + tm.assert_series_equal(xp, rs) + + @td.skip_if_no_scipy + def test_cmov_window_regular_linear_range(self, win_types): + # GH 8238 + vals = np.array(range(10), dtype=np.float) + xp = vals.copy() + xp[:2] = np.nan + xp[-2:] = np.nan + xp = Series(xp) + + rs = Series(vals).rolling(5, win_type=win_types, center=True).mean() + tm.assert_series_equal(xp, rs) + + @td.skip_if_no_scipy + def test_cmov_window_regular_missing_data(self, win_types): + # GH 8238 + vals = np.array( + [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48] + ) + xps = { + "bartlett": [ + np.nan, + np.nan, + 9.70333, + 10.5225, + 8.4425, + 9.1925, + 12.5575, + 14.3675, + 15.61667, + 13.655, + ], + "blackman": [ + np.nan, + np.nan, + 9.04582, + 11.41536, + 7.73345, + 9.17869, + 12.79607, + 14.20036, + 15.8706, + 13.655, + ], + "barthann": [ + np.nan, + np.nan, + 9.70333, + 10.5225, + 8.4425, + 9.1925, + 12.5575, + 14.3675, + 15.61667, + 13.655, + ], + "bohman": [ + np.nan, + np.nan, + 8.9444, + 11.56327, + 7.61599, + 9.1764, + 12.83559, + 14.17267, + 15.90976, + 13.655, + ], + "hamming": [ + np.nan, + np.nan, + 9.59321, + 10.29694, + 8.71384, + 9.56348, + 12.38009, + 14.20565, + 15.24694, + 13.69758, + ], + "nuttall": [ + np.nan, + np.nan, + 8.47693, + 12.2821, + 7.04618, + 9.16786, + 13.02671, + 14.03673, + 16.08759, + 13.65553, + ], + "triang": [ + np.nan, + np.nan, + 9.33167, + 9.76125, + 9.28667, + 10.34667, + 12.00556, + 13.82125, + 14.49429, + 13.765, + ], + "blackmanharris": [ + np.nan, + np.nan, + 8.42526, + 12.36824, + 6.97691, + 9.16438, + 13.05052, + 14.02175, + 16.1098, + 13.65509, + ], + } + + xp = Series(xps[win_types]) + rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean() + tm.assert_series_equal(xp, rs) + + @td.skip_if_no_scipy + def test_cmov_window_special(self, win_types_special): + # GH 8238 + kwds = { + "kaiser": {"beta": 1.0}, + "gaussian": {"std": 1.0}, + "general_gaussian": {"power": 2.0, "width": 2.0}, + "exponential": {"tau": 10}, + } + + vals = np.array( + [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48] + ) + + xps = { + "gaussian": [ + np.nan, + np.nan, + 8.97297, + 9.76077, + 12.24763, + 13.89053, + 13.65671, + 12.01002, + np.nan, + np.nan, + ], + "general_gaussian": [ + np.nan, + np.nan, + 9.85011, + 10.71589, + 11.73161, + 13.08516, + 12.95111, + 12.74577, + np.nan, + np.nan, + ], + "kaiser": [ + np.nan, + np.nan, + 9.86851, + 11.02969, + 11.65161, + 12.75129, + 12.90702, + 12.83757, + np.nan, + np.nan, + ], + "exponential": [ + np.nan, + np.nan, + 9.83364, + 11.10472, + 11.64551, + 12.66138, + 12.92379, + 12.83770, + np.nan, + np.nan, + ], + } + + xp = Series(xps[win_types_special]) + rs = ( + Series(vals) + .rolling(5, win_type=win_types_special, center=True) + .mean(**kwds[win_types_special]) + ) + tm.assert_series_equal(xp, rs) + + @td.skip_if_no_scipy + def test_cmov_window_special_linear_range(self, win_types_special): + # GH 8238 + kwds = { + "kaiser": {"beta": 1.0}, + "gaussian": {"std": 1.0}, + "general_gaussian": {"power": 2.0, "width": 2.0}, + "slepian": {"width": 0.5}, + "exponential": {"tau": 10}, + } + + vals = np.array(range(10), dtype=np.float) + xp = vals.copy() + xp[:2] = np.nan + xp[-2:] = np.nan + xp = Series(xp) + + rs = ( + Series(vals) + .rolling(5, win_type=win_types_special, center=True) + .mean(**kwds[win_types_special]) + ) + tm.assert_series_equal(xp, rs) + + def test_rolling_median(self, raw): + self._check_moment_func(np.median, name="median", raw=raw) + + def test_rolling_min(self, raw): + self._check_moment_func(np.min, name="min", raw=raw) + + a = pd.Series([1, 2, 3, 4, 5]) + result = a.rolling(window=100, min_periods=1).min() + expected = pd.Series(np.ones(len(a))) + tm.assert_series_equal(result, expected) + + with pytest.raises(ValueError): + pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min() + + def test_rolling_max(self, raw): + self._check_moment_func(np.max, name="max", raw=raw) + + a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64) + b = a.rolling(window=100, min_periods=1).max() + tm.assert_almost_equal(a, b) + + with pytest.raises(ValueError): + pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max() + + @pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) + def test_rolling_quantile(self, q, raw): + def scoreatpercentile(a, per): + values = np.sort(a, axis=0) + + idx = int(per / 1.0 * (values.shape[0] - 1)) + + if idx == values.shape[0] - 1: + retval = values[-1] + + else: + qlow = float(idx) / float(values.shape[0] - 1) + qhig = float(idx + 1) / float(values.shape[0] - 1) + vlow = values[idx] + vhig = values[idx + 1] + retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow) + + return retval + + def quantile_func(x): + return scoreatpercentile(x, q) + + self._check_moment_func(quantile_func, name="quantile", quantile=q, raw=raw) + + def test_rolling_quantile_np_percentile(self): + # #9413: Tests that rolling window's quantile default behavior + # is analogous to Numpy's percentile + row = 10 + col = 5 + idx = pd.date_range("20100101", periods=row, freq="B") + df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx) + + df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0) + np_percentile = np.percentile(df, [25, 50, 75], axis=0) + + tm.assert_almost_equal(df_quantile.values, np.array(np_percentile)) + + @pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1]) + @pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"] + ) + @pytest.mark.parametrize( + "data", + [ + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], + [8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0], + [0.0, np.nan, 0.2, np.nan, 0.4], + [np.nan, np.nan, np.nan, np.nan], + [np.nan, 0.1, np.nan, 0.3, 0.4, 0.5], + [0.5], + [np.nan, 0.7, 0.6], + ], + ) + def test_rolling_quantile_interpolation_options( + self, quantile, interpolation, data + ): + # Tests that rolling window's quantile behavior is analogous to + # Series' quantile for each interpolation option + s = Series(data) + + q1 = s.quantile(quantile, interpolation) + q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1] + + if np.isnan(q1): + assert np.isnan(q2) + else: + assert q1 == q2 + + def test_invalid_quantile_value(self): + data = np.arange(5) + s = Series(data) + + msg = "Interpolation 'invalid' is not supported" + with pytest.raises(ValueError, match=msg): + s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid") + + def test_rolling_quantile_param(self): + ser = Series([0.0, 0.1, 0.5, 0.9, 1.0]) + + with pytest.raises(ValueError): + ser.rolling(3).quantile(-0.1) + + with pytest.raises(ValueError): + ser.rolling(3).quantile(10.0) + + with pytest.raises(TypeError): + ser.rolling(3).quantile("foo") + + def test_rolling_apply(self, raw): + # suppress warnings about empty slices, as we are deliberately testing + # with a 0-length Series + + def f(x): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=".*(empty slice|0 for slice).*", + category=RuntimeWarning, + ) + return x[np.isfinite(x)].mean() + + self._check_moment_func(np.mean, name="apply", func=f, raw=raw) + + expected = Series([]) + result = expected.rolling(10).apply(lambda x: x.mean(), raw=raw) + tm.assert_series_equal(result, expected) + + # gh-8080 + s = Series([None, None, None]) + result = s.rolling(2, min_periods=0).apply(lambda x: len(x), raw=raw) + expected = Series([1.0, 2.0, 2.0]) + tm.assert_series_equal(result, expected) + + result = s.rolling(2, min_periods=0).apply(len, raw=raw) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("klass", [Series, DataFrame]) + @pytest.mark.parametrize( + "method", [lambda x: x.rolling(window=2), lambda x: x.expanding()] + ) + def test_apply_future_warning(self, klass, method): + + # gh-5071 + s = klass(np.arange(3)) + + with tm.assert_produces_warning(FutureWarning): + method(s).apply(lambda x: len(x)) + + def test_rolling_apply_out_of_bounds(self, raw): + # gh-1850 + vals = pd.Series([1, 2, 3, 4]) + + result = vals.rolling(10).apply(np.sum, raw=raw) + assert result.isna().all() + + result = vals.rolling(10, min_periods=1).apply(np.sum, raw=raw) + expected = pd.Series([1, 3, 6, 10], dtype=float) + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("window", [2, "2s"]) + def test_rolling_apply_with_pandas_objects(self, window): + # 5071 + df = pd.DataFrame( + {"A": np.random.randn(5), "B": np.random.randint(0, 10, size=5)}, + index=pd.date_range("20130101", periods=5, freq="s"), + ) + + # we have an equal spaced timeseries index + # so simulate removing the first period + def f(x): + if x.index[0] == df.index[0]: + return np.nan + return x.iloc[-1] + + result = df.rolling(window).apply(f, raw=False) + expected = df.iloc[2:].reindex_like(df) + tm.assert_frame_equal(result, expected) + + with pytest.raises(AttributeError): + df.rolling(window).apply(f, raw=True) + + def test_rolling_std(self, raw): + self._check_moment_func(lambda x: np.std(x, ddof=1), name="std", raw=raw) + self._check_moment_func( + lambda x: np.std(x, ddof=0), name="std", ddof=0, raw=raw + ) + + def test_rolling_std_1obs(self): + vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0]) + + result = vals.rolling(1, min_periods=1).std() + expected = pd.Series([np.nan] * 5) + tm.assert_series_equal(result, expected) + + result = vals.rolling(1, min_periods=1).std(ddof=0) + expected = pd.Series([0.0] * 5) + tm.assert_series_equal(result, expected) + + result = pd.Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std() + assert np.isnan(result[2]) + + def test_rolling_std_neg_sqrt(self): + # unit test from Bottleneck + + # Test move_nanstd for neg sqrt. + + a = pd.Series( + [ + 0.0011448196318903589, + 0.00028718669878572767, + 0.00028718669878572767, + 0.00028718669878572767, + 0.00028718669878572767, + ] + ) + b = a.rolling(window=3).std() + assert np.isfinite(b[2:]).all() + + b = a.ewm(span=3).std() + assert np.isfinite(b[2:]).all() + + def test_rolling_var(self, raw): + self._check_moment_func(lambda x: np.var(x, ddof=1), name="var", raw=raw) + self._check_moment_func( + lambda x: np.var(x, ddof=0), name="var", ddof=0, raw=raw + ) + + @td.skip_if_no_scipy + def test_rolling_skew(self, raw): + from scipy.stats import skew + + self._check_moment_func(lambda x: skew(x, bias=False), name="skew", raw=raw) + + @td.skip_if_no_scipy + def test_rolling_kurt(self, raw): + from scipy.stats import kurtosis + + self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt", raw=raw) + + def _check_moment_func( + self, + static_comp, + name, + raw, + has_min_periods=True, + has_center=True, + has_time_rule=True, + fill_value=None, + zero_min_periods_equal=True, + **kwargs + ): + + # inject raw + if name == "apply": + kwargs = copy.copy(kwargs) + kwargs["raw"] = raw + + def get_result(obj, window, min_periods=None, center=False): + r = obj.rolling(window=window, min_periods=min_periods, center=center) + return getattr(r, name)(**kwargs) + + series_result = get_result(self.series, window=50) + assert isinstance(series_result, Series) + tm.assert_almost_equal(series_result.iloc[-1], static_comp(self.series[-50:])) + + frame_result = get_result(self.frame, window=50) + assert isinstance(frame_result, DataFrame) + tm.assert_series_equal( + frame_result.iloc[-1, :], + self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw), + check_names=False, + ) + + # check time_rule works + if has_time_rule: + win = 25 + minp = 10 + series = self.series[::2].resample("B").mean() + frame = self.frame[::2].resample("B").mean() + + if has_min_periods: + series_result = get_result(series, window=win, min_periods=minp) + frame_result = get_result(frame, window=win, min_periods=minp) + else: + series_result = get_result(series, window=win) + frame_result = get_result(frame, window=win) + + last_date = series_result.index[-1] + prev_date = last_date - 24 * offsets.BDay() + + trunc_series = self.series[::2].truncate(prev_date, last_date) + trunc_frame = self.frame[::2].truncate(prev_date, last_date) + + tm.assert_almost_equal(series_result[-1], static_comp(trunc_series)) + + tm.assert_series_equal( + frame_result.xs(last_date), + trunc_frame.apply(static_comp, raw=raw), + check_names=False, + ) + + # excluding NaNs correctly + obj = Series(randn(50)) + obj[:10] = np.NaN + obj[-10:] = np.NaN + if has_min_periods: + result = get_result(obj, 50, min_periods=30) + tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10])) + + # min_periods is working correctly + result = get_result(obj, 20, min_periods=15) + assert isna(result.iloc[23]) + assert not isna(result.iloc[24]) + + assert not isna(result.iloc[-6]) + assert isna(result.iloc[-5]) + + obj2 = Series(randn(20)) + result = get_result(obj2, 10, min_periods=5) + assert isna(result.iloc[3]) + assert notna(result.iloc[4]) + + if zero_min_periods_equal: + # min_periods=0 may be equivalent to min_periods=1 + result0 = get_result(obj, 20, min_periods=0) + result1 = get_result(obj, 20, min_periods=1) + tm.assert_almost_equal(result0, result1) + else: + result = get_result(obj, 50) + tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10])) + + # window larger than series length (#7297) + if has_min_periods: + for minp in (0, len(self.series) - 1, len(self.series)): + result = get_result(self.series, len(self.series) + 1, min_periods=minp) + expected = get_result(self.series, len(self.series), min_periods=minp) + nan_mask = isna(result) + tm.assert_series_equal(nan_mask, isna(expected)) + + nan_mask = ~nan_mask + tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) + else: + result = get_result(self.series, len(self.series) + 1) + expected = get_result(self.series, len(self.series)) + nan_mask = isna(result) + tm.assert_series_equal(nan_mask, isna(expected)) + + nan_mask = ~nan_mask + tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) + + # check center=True + if has_center: + if has_min_periods: + result = get_result(obj, 20, min_periods=15, center=True) + expected = get_result( + pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15 + )[9:].reset_index(drop=True) + else: + result = get_result(obj, 20, center=True) + expected = get_result(pd.concat([obj, Series([np.NaN] * 9)]), 20)[ + 9: + ].reset_index(drop=True) + + tm.assert_series_equal(result, expected) + + # shifter index + s = ["x{x:d}".format(x=x) for x in range(12)] + + if has_min_periods: + minp = 10 + + series_xp = ( + get_result( + self.series.reindex(list(self.series.index) + s), + window=25, + min_periods=minp, + ) + .shift(-12) + .reindex(self.series.index) + ) + frame_xp = ( + get_result( + self.frame.reindex(list(self.frame.index) + s), + window=25, + min_periods=minp, + ) + .shift(-12) + .reindex(self.frame.index) + ) + + series_rs = get_result( + self.series, window=25, min_periods=minp, center=True + ) + frame_rs = get_result( + self.frame, window=25, min_periods=minp, center=True + ) + + else: + series_xp = ( + get_result( + self.series.reindex(list(self.series.index) + s), window=25 + ) + .shift(-12) + .reindex(self.series.index) + ) + frame_xp = ( + get_result( + self.frame.reindex(list(self.frame.index) + s), window=25 + ) + .shift(-12) + .reindex(self.frame.index) + ) + + series_rs = get_result(self.series, window=25, center=True) + frame_rs = get_result(self.frame, window=25, center=True) + + if fill_value is not None: + series_xp = series_xp.fillna(fill_value) + frame_xp = frame_xp.fillna(fill_value) + tm.assert_series_equal(series_xp, series_rs) + tm.assert_frame_equal(frame_xp, frame_rs) + + def test_ewma(self): + self._check_ew(name="mean") + + vals = pd.Series(np.zeros(1000)) + vals[5] = 1 + result = vals.ewm(span=100, adjust=False).mean().sum() + assert np.abs(result - 1) < 1e-2 + + @pytest.mark.parametrize("adjust", [True, False]) + @pytest.mark.parametrize("ignore_na", [True, False]) + def test_ewma_cases(self, adjust, ignore_na): + # try adjust/ignore_na args matrix + + s = Series([1.0, 2.0, 4.0, 8.0]) + + if adjust: + expected = Series([1.0, 1.6, 2.736842, 4.923077]) + else: + expected = Series([1.0, 1.333333, 2.222222, 4.148148]) + + result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() + tm.assert_series_equal(result, expected) + + def test_ewma_nan_handling(self): + s = Series([1.0] + [np.nan] * 5 + [1.0]) + result = s.ewm(com=5).mean() + tm.assert_series_equal(result, Series([1.0] * len(s))) + + s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0]) + result = s.ewm(com=5).mean() + tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4)) + + # GH 7603 + s0 = Series([np.nan, 1.0, 101.0]) + s1 = Series([1.0, np.nan, 101.0]) + s2 = Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]) + s3 = Series([1.0, np.nan, 101.0, 50.0]) + com = 2.0 + alpha = 1.0 / (1.0 + com) + + def simple_wma(s, w): + return (s.multiply(w).cumsum() / w.cumsum()).fillna(method="ffill") + + for (s, adjust, ignore_na, w) in [ + (s0, True, False, [np.nan, (1.0 - alpha), 1.0]), + (s0, True, True, [np.nan, (1.0 - alpha), 1.0]), + (s0, False, False, [np.nan, (1.0 - alpha), alpha]), + (s0, False, True, [np.nan, (1.0 - alpha), alpha]), + (s1, True, False, [(1.0 - alpha) ** 2, np.nan, 1.0]), + (s1, True, True, [(1.0 - alpha), np.nan, 1.0]), + (s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]), + (s1, False, True, [(1.0 - alpha), np.nan, alpha]), + ( + s2, + True, + False, + [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan], + ), + (s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]), + ( + s2, + False, + False, + [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, alpha, np.nan], + ), + (s2, False, True, [np.nan, (1.0 - alpha), np.nan, np.nan, alpha, np.nan]), + (s3, True, False, [(1.0 - alpha) ** 3, np.nan, (1.0 - alpha), 1.0]), + (s3, True, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha), 1.0]), + ( + s3, + False, + False, + [ + (1.0 - alpha) ** 3, + np.nan, + (1.0 - alpha) * alpha, + alpha * ((1.0 - alpha) ** 2 + alpha), + ], + ), + ( + s3, + False, + True, + [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha], + ), + ]: + expected = simple_wma(s, Series(w)) + result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean() + + tm.assert_series_equal(result, expected) + if ignore_na is False: + # check that ignore_na defaults to False + result = s.ewm(com=com, adjust=adjust).mean() + tm.assert_series_equal(result, expected) + + def test_ewmvar(self): + self._check_ew(name="var") + + def test_ewmvol(self): + self._check_ew(name="vol") + + def test_ewma_span_com_args(self): + A = self.series.ewm(com=9.5).mean() + B = self.series.ewm(span=20).mean() + tm.assert_almost_equal(A, B) + + with pytest.raises(ValueError): + self.series.ewm(com=9.5, span=20) + with pytest.raises(ValueError): + self.series.ewm().mean() + + def test_ewma_halflife_arg(self): + A = self.series.ewm(com=13.932726172912965).mean() + B = self.series.ewm(halflife=10.0).mean() + tm.assert_almost_equal(A, B) + + with pytest.raises(ValueError): + self.series.ewm(span=20, halflife=50) + with pytest.raises(ValueError): + self.series.ewm(com=9.5, halflife=50) + with pytest.raises(ValueError): + self.series.ewm(com=9.5, span=20, halflife=50) + with pytest.raises(ValueError): + self.series.ewm() + + def test_ewm_alpha(self): + # GH 10789 + s = Series(self.arr) + a = s.ewm(alpha=0.61722699889169674).mean() + b = s.ewm(com=0.62014947789973052).mean() + c = s.ewm(span=2.240298955799461).mean() + d = s.ewm(halflife=0.721792864318).mean() + tm.assert_series_equal(a, b) + tm.assert_series_equal(a, c) + tm.assert_series_equal(a, d) + + def test_ewm_alpha_arg(self): + # GH 10789 + s = self.series + with pytest.raises(ValueError): + s.ewm() + with pytest.raises(ValueError): + s.ewm(com=10.0, alpha=0.5) + with pytest.raises(ValueError): + s.ewm(span=10.0, alpha=0.5) + with pytest.raises(ValueError): + s.ewm(halflife=10.0, alpha=0.5) + + def test_ewm_domain_checks(self): + # GH 12492 + s = Series(self.arr) + msg = "comass must satisfy: comass >= 0" + with pytest.raises(ValueError, match=msg): + s.ewm(com=-0.1) + s.ewm(com=0.0) + s.ewm(com=0.1) + + msg = "span must satisfy: span >= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(span=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.0) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.9) + s.ewm(span=1.0) + s.ewm(span=1.1) + + msg = "halflife must satisfy: halflife > 0" + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=0.0) + s.ewm(halflife=0.1) + + msg = "alpha must satisfy: 0 < alpha <= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=0.0) + s.ewm(alpha=0.1) + s.ewm(alpha=1.0) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=1.1) + + @pytest.mark.parametrize("method", ["mean", "vol", "var"]) + def test_ew_empty_series(self, method): + vals = pd.Series([], dtype=np.float64) + + ewm = vals.ewm(3) + result = getattr(ewm, method)() + tm.assert_almost_equal(result, vals) + + def _check_ew(self, name=None, preserve_nan=False): + series_result = getattr(self.series.ewm(com=10), name)() + assert isinstance(series_result, Series) + + frame_result = getattr(self.frame.ewm(com=10), name)() + assert type(frame_result) == DataFrame + + result = getattr(self.series.ewm(com=10), name)() + if preserve_nan: + assert result[self._nan_locs].isna().all() + + # excluding NaNs correctly + arr = randn(50) + arr[:10] = np.NaN + arr[-10:] = np.NaN + s = Series(arr) + + # check min_periods + # GH 7898 + result = getattr(s.ewm(com=50, min_periods=2), name)() + assert result[:11].isna().all() + assert not result[11:].isna().any() + + for min_periods in (0, 1): + result = getattr(s.ewm(com=50, min_periods=min_periods), name)() + if name == "mean": + assert result[:10].isna().all() + assert not result[10:].isna().any() + else: + # ewm.std, ewm.vol, ewm.var (with bias=False) require at least + # two values + assert result[:11].isna().all() + assert not result[11:].isna().any() + + # check series of length 0 + result = getattr(Series().ewm(com=50, min_periods=min_periods), name)() + tm.assert_series_equal(result, Series()) + + # check series of length 1 + result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)() + if name == "mean": + tm.assert_series_equal(result, Series([1.0])) + else: + # ewm.std, ewm.vol, ewm.var with bias=False require at least + # two values + tm.assert_series_equal(result, Series([np.NaN])) + + # pass in ints + result2 = getattr(Series(np.arange(50)).ewm(span=10), name)() + assert result2.dtype == np.float_ + + +# create the data only once as we are not setting it +def _create_consistency_data(): + def create_series(): + return [ + Series(), + Series([np.nan]), + Series([np.nan, np.nan]), + Series([3.0]), + Series([np.nan, 3.0]), + Series([3.0, np.nan]), + Series([1.0, 3.0]), + Series([2.0, 2.0]), + Series([3.0, 1.0]), + Series( + [5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan] + ), + Series( + [ + np.nan, + 5.0, + 5.0, + 5.0, + np.nan, + np.nan, + np.nan, + 5.0, + 5.0, + np.nan, + np.nan, + ] + ), + Series( + [ + np.nan, + np.nan, + 5.0, + 5.0, + np.nan, + np.nan, + np.nan, + 5.0, + 5.0, + np.nan, + np.nan, + ] + ), + Series( + [ + np.nan, + 3.0, + np.nan, + 3.0, + 4.0, + 5.0, + 6.0, + np.nan, + np.nan, + 7.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series( + [ + np.nan, + 5.0, + np.nan, + 2.0, + 4.0, + 0.0, + 9.0, + np.nan, + np.nan, + 3.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series( + [ + 2.0, + 3.0, + np.nan, + 3.0, + 4.0, + 5.0, + 6.0, + np.nan, + np.nan, + 7.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series( + [ + 2.0, + 5.0, + np.nan, + 2.0, + 4.0, + 0.0, + 9.0, + np.nan, + np.nan, + 3.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series(range(10)), + Series(range(20, 0, -2)), + ] + + def create_dataframes(): + return [ + DataFrame(), + DataFrame(columns=["a"]), + DataFrame(columns=["a", "a"]), + DataFrame(columns=["a", "b"]), + DataFrame(np.arange(10).reshape((5, 2))), + DataFrame(np.arange(25).reshape((5, 5))), + DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]), + ] + [DataFrame(s) for s in create_series()] + + def is_constant(x): + values = x.values.ravel() + return len(set(values[notna(values)])) == 1 + + def no_nans(x): + return x.notna().all().all() + + # data is a tuple(object, is_constant, no_nans) + data = create_series() + create_dataframes() + + return [(x, is_constant(x), no_nans(x)) for x in data] + + +_consistency_data = _create_consistency_data() + + +def _rolling_consistency_cases(): + for window in [1, 2, 3, 10, 20]: + for min_periods in {0, 1, 2, 3, 4, window}: + if min_periods and (min_periods > window): + continue + for center in [False, True]: + yield window, min_periods, center + + +class TestMomentsConsistency(Base): + base_functions = [ + (lambda v: Series(v).count(), None, "count"), + (lambda v: Series(v).max(), None, "max"), + (lambda v: Series(v).min(), None, "min"), + (lambda v: Series(v).sum(), None, "sum"), + (lambda v: Series(v).mean(), None, "mean"), + (lambda v: Series(v).std(), 1, "std"), + (lambda v: Series(v).cov(Series(v)), None, "cov"), + (lambda v: Series(v).corr(Series(v)), None, "corr"), + (lambda v: Series(v).var(), 1, "var"), + # restore once GH 8086 is fixed + # lambda v: Series(v).skew(), 3, 'skew'), + # (lambda v: Series(v).kurt(), 4, 'kurt'), + # restore once GH 8084 is fixed + # lambda v: Series(v).quantile(0.3), None, 'quantile'), + (lambda v: Series(v).median(), None, "median"), + (np.nanmax, 1, "max"), + (np.nanmin, 1, "min"), + (np.nansum, 1, "sum"), + (np.nanmean, 1, "mean"), + (lambda v: np.nanstd(v, ddof=1), 1, "std"), + (lambda v: np.nanvar(v, ddof=1), 1, "var"), + (np.nanmedian, 1, "median"), + ] + no_nan_functions = [ + (np.max, None, "max"), + (np.min, None, "min"), + (np.sum, None, "sum"), + (np.mean, None, "mean"), + (lambda v: np.std(v, ddof=1), 1, "std"), + (lambda v: np.var(v, ddof=1), 1, "var"), + (np.median, None, "median"), + ] + + def _create_data(self): + super()._create_data() + self.data = _consistency_data + + def setup_method(self, method): + self._create_data() + + def _test_moments_consistency( + self, + min_periods, + count, + mean, + mock_mean, + corr, + var_unbiased=None, + std_unbiased=None, + cov_unbiased=None, + var_biased=None, + std_biased=None, + cov_biased=None, + var_debiasing_factors=None, + ): + def _non_null_values(x): + values = x.values.ravel() + return set(values[notna(values)].tolist()) + + for (x, is_constant, no_nans) in self.data: + count_x = count(x) + mean_x = mean(x) + + if mock_mean: + # check that mean equals mock_mean + expected = mock_mean(x) + tm.assert_equal(mean_x, expected.astype("float64")) + + # check that correlation of a series with itself is either 1 or NaN + corr_x_x = corr(x, x) + + # assert _non_null_values(corr_x_x).issubset(set([1.])) + # restore once rolling_cov(x, x) is identically equal to var(x) + + if is_constant: + exp = x.max() if isinstance(x, Series) else x.max().max() + + # check mean of constant series + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = exp + tm.assert_equal(mean_x, expected) + + # check correlation of constant series with itself is NaN + expected[:] = np.nan + tm.assert_equal(corr_x_x, expected) + + if var_unbiased and var_biased and var_debiasing_factors: + # check variance debiasing factors + var_unbiased_x = var_unbiased(x) + var_biased_x = var_biased(x) + var_debiasing_factors_x = var_debiasing_factors(x) + tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) + + for (std, var, cov) in [ + (std_biased, var_biased, cov_biased), + (std_unbiased, var_unbiased, cov_unbiased), + ]: + + # check that var(x), std(x), and cov(x) are all >= 0 + var_x = var(x) + std_x = std(x) + assert not (var_x < 0).any().any() + assert not (std_x < 0).any().any() + if cov: + cov_x_x = cov(x, x) + assert not (cov_x_x < 0).any().any() + + # check that var(x) == cov(x, x) + tm.assert_equal(var_x, cov_x_x) + + # check that var(x) == std(x)^2 + tm.assert_equal(var_x, std_x * std_x) + + if var is var_biased: + # check that biased var(x) == mean(x^2) - mean(x)^2 + mean_x2 = mean(x * x) + tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) + + if is_constant: + # check that variance of constant series is identically 0 + assert not (var_x > 0).any().any() + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = 0.0 + if var is var_unbiased: + expected[count_x < 2] = np.nan + tm.assert_equal(var_x, expected) + + if isinstance(x, Series): + for (y, is_constant, no_nans) in self.data: + if not x.isna().equals(y.isna()): + # can only easily test two Series with similar + # structure + continue + + # check that cor(x, y) is symmetric + corr_x_y = corr(x, y) + corr_y_x = corr(y, x) + tm.assert_equal(corr_x_y, corr_y_x) + + if cov: + # check that cov(x, y) is symmetric + cov_x_y = cov(x, y) + cov_y_x = cov(y, x) + tm.assert_equal(cov_x_y, cov_y_x) + + # check that cov(x, y) == (var(x+y) - var(x) - + # var(y)) / 2 + var_x_plus_y = var(x + y) + var_y = var(y) + tm.assert_equal( + cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y) + ) + + # check that corr(x, y) == cov(x, y) / (std(x) * + # std(y)) + std_y = std(y) + tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) + + if cov is cov_biased: + # check that biased cov(x, y) == mean(x*y) - + # mean(x)*mean(y) + mean_y = mean(y) + mean_x_times_y = mean(x * y) + tm.assert_equal( + cov_x_y, mean_x_times_y - (mean_x * mean_y) + ) + + @pytest.mark.slow + @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) + @pytest.mark.parametrize("adjust", [True, False]) + @pytest.mark.parametrize("ignore_na", [True, False]) + def test_ewm_consistency(self, min_periods, adjust, ignore_na): + def _weights(s, com, adjust, ignore_na): + if isinstance(s, DataFrame): + if not len(s.columns): + return DataFrame(index=s.index, columns=s.columns) + w = concat( + [ + _weights( + s.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na + ) + for i, _ in enumerate(s.columns) + ], + axis=1, + ) + w.index = s.index + w.columns = s.columns + return w + + w = Series(np.nan, index=s.index) + alpha = 1.0 / (1.0 + com) + if ignore_na: + w[s.notna()] = _weights( + s[s.notna()], com=com, adjust=adjust, ignore_na=False + ) + elif adjust: + for i in range(len(s)): + if s.iat[i] == s.iat[i]: + w.iat[i] = pow(1.0 / (1.0 - alpha), i) + else: + sum_wts = 0.0 + prev_i = -1 + for i in range(len(s)): + if s.iat[i] == s.iat[i]: + if prev_i == -1: + w.iat[i] = 1.0 + else: + w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, i - prev_i) + sum_wts += w.iat[i] + prev_i = i + return w + + def _variance_debiasing_factors(s, com, adjust, ignore_na): + weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) + cum_sum = weights.cumsum().fillna(method="ffill") + cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill") + numerator = cum_sum * cum_sum + denominator = numerator - cum_sum_sq + denominator[denominator <= 0.0] = np.nan + return numerator / denominator + + def _ewma(s, com, min_periods, adjust, ignore_na): + weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) + result = ( + s.multiply(weights) + .cumsum() + .divide(weights.cumsum()) + .fillna(method="ffill") + ) + result[ + s.expanding().count() < (max(min_periods, 1) if min_periods else 1) + ] = np.nan + return result + + com = 3.0 + # test consistency between different ewm* moments + self._test_moments_consistency( + min_periods=min_periods, + count=lambda x: x.expanding().count(), + mean=lambda x: x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).mean(), + mock_mean=lambda x: _ewma( + x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ), + corr=lambda x, y: x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).corr(y), + var_unbiased=lambda x: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=False) + ), + std_unbiased=lambda x: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).std(bias=False) + ), + cov_unbiased=lambda x, y: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).cov(y, bias=False) + ), + var_biased=lambda x: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=True) + ), + std_biased=lambda x: x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).std(bias=True), + cov_biased=lambda x, y: ( + x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).cov(y, bias=True) + ), + var_debiasing_factors=lambda x: ( + _variance_debiasing_factors( + x, com=com, adjust=adjust, ignore_na=ignore_na + ) + ), + ) + + @pytest.mark.slow + @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) + def test_expanding_consistency(self, min_periods): + + # suppress warnings about empty slices, as we are deliberately testing + # with empty/0-length Series/DataFrames + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=".*(empty slice|0 for slice).*", + category=RuntimeWarning, + ) + + # test consistency between different expanding_* moments + self._test_moments_consistency( + min_periods=min_periods, + count=lambda x: x.expanding().count(), + mean=lambda x: x.expanding(min_periods=min_periods).mean(), + mock_mean=lambda x: x.expanding(min_periods=min_periods).sum() + / x.expanding().count(), + corr=lambda x, y: x.expanding(min_periods=min_periods).corr(y), + var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), + std_unbiased=lambda x: x.expanding(min_periods=min_periods).std(), + cov_unbiased=lambda x, y: x.expanding(min_periods=min_periods).cov(y), + var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), + std_biased=lambda x: x.expanding(min_periods=min_periods).std(ddof=0), + cov_biased=lambda x, y: x.expanding(min_periods=min_periods).cov( + y, ddof=0 + ), + var_debiasing_factors=lambda x: ( + x.expanding().count() + / (x.expanding().count() - 1.0).replace(0.0, np.nan) + ), + ) + + # test consistency between expanding_xyz() and either (a) + # expanding_apply of Series.xyz(), or (b) expanding_apply of + # np.nanxyz() + for (x, is_constant, no_nans) in self.data: + functions = self.base_functions + + # GH 8269 + if no_nans: + functions = self.base_functions + self.no_nan_functions + for (f, require_min_periods, name) in functions: + expanding_f = getattr(x.expanding(min_periods=min_periods), name) + + if ( + require_min_periods + and (min_periods is not None) + and (min_periods < require_min_periods) + ): + continue + + if name == "count": + expanding_f_result = expanding_f() + expanding_apply_f_result = x.expanding(min_periods=0).apply( + func=f, raw=True + ) + else: + if name in ["cov", "corr"]: + expanding_f_result = expanding_f(pairwise=False) + else: + expanding_f_result = expanding_f() + expanding_apply_f_result = x.expanding( + min_periods=min_periods + ).apply(func=f, raw=True) + + # GH 9422 + if name in ["sum", "prod"]: + tm.assert_equal(expanding_f_result, expanding_apply_f_result) + + @pytest.mark.slow + @pytest.mark.parametrize( + "window,min_periods,center", list(_rolling_consistency_cases()) + ) + def test_rolling_consistency(self, window, min_periods, center): + + # suppress warnings about empty slices, as we are deliberately testing + # with empty/0-length Series/DataFrames + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=".*(empty slice|0 for slice).*", + category=RuntimeWarning, + ) + + # test consistency between different rolling_* moments + self._test_moments_consistency( + min_periods=min_periods, + count=lambda x: (x.rolling(window=window, center=center).count()), + mean=lambda x: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).mean() + ), + mock_mean=lambda x: ( + x.rolling(window=window, min_periods=min_periods, center=center) + .sum() + .divide( + x.rolling( + window=window, min_periods=min_periods, center=center + ).count() + ) + ), + corr=lambda x, y: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).corr(y) + ), + var_unbiased=lambda x: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).var() + ), + std_unbiased=lambda x: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).std() + ), + cov_unbiased=lambda x, y: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).cov(y) + ), + var_biased=lambda x: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).var(ddof=0) + ), + std_biased=lambda x: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).std(ddof=0) + ), + cov_biased=lambda x, y: ( + x.rolling( + window=window, min_periods=min_periods, center=center + ).cov(y, ddof=0) + ), + var_debiasing_factors=lambda x: ( + x.rolling(window=window, center=center) + .count() + .divide( + (x.rolling(window=window, center=center).count() - 1.0).replace( + 0.0, np.nan + ) + ) + ), + ) + + # test consistency between rolling_xyz() and either (a) + # rolling_apply of Series.xyz(), or (b) rolling_apply of + # np.nanxyz() + for (x, is_constant, no_nans) in self.data: + functions = self.base_functions + + # GH 8269 + if no_nans: + functions = self.base_functions + self.no_nan_functions + for (f, require_min_periods, name) in functions: + rolling_f = getattr( + x.rolling( + window=window, center=center, min_periods=min_periods + ), + name, + ) + + if ( + require_min_periods + and (min_periods is not None) + and (min_periods < require_min_periods) + ): + continue + + if name == "count": + rolling_f_result = rolling_f() + rolling_apply_f_result = x.rolling( + window=window, min_periods=0, center=center + ).apply(func=f, raw=True) + else: + if name in ["cov", "corr"]: + rolling_f_result = rolling_f(pairwise=False) + else: + rolling_f_result = rolling_f() + rolling_apply_f_result = x.rolling( + window=window, min_periods=min_periods, center=center + ).apply(func=f, raw=True) + + # GH 9422 + if name in ["sum", "prod"]: + tm.assert_equal(rolling_f_result, rolling_apply_f_result) + + # binary moments + def test_rolling_cov(self): + A = self.series + B = A + randn(len(A)) + + result = A.rolling(window=50, min_periods=25).cov(B) + tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1]) + + def test_rolling_cov_pairwise(self): + self._check_pairwise_moment("rolling", "cov", window=10, min_periods=5) + + def test_rolling_corr(self): + A = self.series + B = A + randn(len(A)) + + result = A.rolling(window=50, min_periods=25).corr(B) + tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1]) + + # test for correct bias correction + a = tm.makeTimeSeries() + b = tm.makeTimeSeries() + a[:5] = np.nan + b[:10] = np.nan + + result = a.rolling(window=len(a), min_periods=1).corr(b) + tm.assert_almost_equal(result[-1], a.corr(b)) + + def test_rolling_corr_pairwise(self): + self._check_pairwise_moment("rolling", "corr", window=10, min_periods=5) + + @pytest.mark.parametrize("window", range(7)) + def test_rolling_corr_with_zero_variance(self, window): + # GH 18430 + s = pd.Series(np.zeros(20)) + other = pd.Series(np.arange(20)) + + assert s.rolling(window=window).corr(other=other).isna().all() + + def _check_pairwise_moment(self, dispatch, name, **kwargs): + def get_result(obj, obj2=None): + return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2) + + result = get_result(self.frame) + result = result.loc[(slice(None), 1), 5] + result.index = result.index.droplevel(1) + expected = get_result(self.frame[1], self.frame[5]) + tm.assert_series_equal(result, expected, check_names=False) + + def test_flex_binary_moment(self): + # GH3155 + # don't blow the stack + msg = ( + "arguments to moment function must be of type" + " np.ndarray/Series/DataFrame" + ) + with pytest.raises(TypeError, match=msg): + rwindow._flex_binary_moment(5, 6, None) + + def test_corr_sanity(self): + # GH 3155 + df = DataFrame( + np.array( + [ + [0.87024726, 0.18505595], + [0.64355431, 0.3091617], + [0.92372966, 0.50552513], + [0.00203756, 0.04520709], + [0.84780328, 0.33394331], + [0.78369152, 0.63919667], + ] + ) + ) + + res = df[0].rolling(5, center=True).corr(df[1]) + assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res) + + # and some fuzzing + for _ in range(10): + df = DataFrame(np.random.rand(30, 2)) + res = df[0].rolling(5, center=True).corr(df[1]) + try: + assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res) + except AssertionError: + print(res) + + @pytest.mark.parametrize("method", ["corr", "cov"]) + def test_flex_binary_frame(self, method): + series = self.frame[1] + + res = getattr(series.rolling(window=10), method)(self.frame) + res2 = getattr(self.frame.rolling(window=10), method)(series) + exp = self.frame.apply(lambda x: getattr(series.rolling(window=10), method)(x)) + + tm.assert_frame_equal(res, exp) + tm.assert_frame_equal(res2, exp) + + frame2 = self.frame.copy() + frame2.values[:] = np.random.randn(*frame2.shape) + + res3 = getattr(self.frame.rolling(window=10), method)(frame2) + exp = DataFrame( + { + k: getattr(self.frame[k].rolling(window=10), method)(frame2[k]) + for k in self.frame + } + ) + tm.assert_frame_equal(res3, exp) + + def test_ewmcov(self): + self._check_binary_ew("cov") + + def test_ewmcov_pairwise(self): + self._check_pairwise_moment("ewm", "cov", span=10, min_periods=5) + + def test_ewmcorr(self): + self._check_binary_ew("corr") + + def test_ewmcorr_pairwise(self): + self._check_pairwise_moment("ewm", "corr", span=10, min_periods=5) + + def _check_binary_ew(self, name): + def func(A, B, com, **kwargs): + return getattr(A.ewm(com, **kwargs), name)(B) + + A = Series(randn(50), index=np.arange(50)) + B = A[2:] + randn(48) + + A[:10] = np.NaN + B[-10:] = np.NaN + + result = func(A, B, 20, min_periods=5) + assert np.isnan(result.values[:14]).all() + assert not np.isnan(result.values[14:]).any() + + # GH 7898 + for min_periods in (0, 1, 2): + result = func(A, B, 20, min_periods=min_periods) + # binary functions (ewmcov, ewmcorr) with bias=False require at + # least two values + assert np.isnan(result.values[:11]).all() + assert not np.isnan(result.values[11:]).any() + + # check series of length 0 + result = func(Series([]), Series([]), 50, min_periods=min_periods) + tm.assert_series_equal(result, Series([])) + + # check series of length 1 + result = func(Series([1.0]), Series([1.0]), 50, min_periods=min_periods) + tm.assert_series_equal(result, Series([np.NaN])) + + msg = "Input arrays must be of the same type!" + # exception raised is Exception + with pytest.raises(Exception, match=msg): + func(A, randn(50), 20, min_periods=5) + + def test_expanding_apply_args_kwargs(self, raw): + def mean_w_arg(x, const): + return np.mean(x) + const + + df = DataFrame(np.random.rand(20, 3)) + + expected = df.expanding().apply(np.mean, raw=raw) + 20.0 + + result = df.expanding().apply(mean_w_arg, raw=raw, args=(20,)) + tm.assert_frame_equal(result, expected) + + result = df.expanding().apply(mean_w_arg, raw=raw, kwargs={"const": 20}) + tm.assert_frame_equal(result, expected) + + def test_expanding_corr(self): + A = self.series.dropna() + B = (A + randn(len(A)))[:-5] + + result = A.expanding().corr(B) + + rolling_result = A.rolling(window=len(A), min_periods=1).corr(B) + + tm.assert_almost_equal(rolling_result, result) + + def test_expanding_count(self): + result = self.series.expanding().count() + tm.assert_almost_equal( + result, self.series.rolling(window=len(self.series)).count() + ) + + def test_expanding_quantile(self): + result = self.series.expanding().quantile(0.5) + + rolling_result = self.series.rolling( + window=len(self.series), min_periods=1 + ).quantile(0.5) + + tm.assert_almost_equal(result, rolling_result) + + def test_expanding_cov(self): + A = self.series + B = (A + randn(len(A)))[:-5] + + result = A.expanding().cov(B) + + rolling_result = A.rolling(window=len(A), min_periods=1).cov(B) + + tm.assert_almost_equal(rolling_result, result) + + def test_expanding_cov_pairwise(self): + result = self.frame.expanding().corr() + + rolling_result = self.frame.rolling( + window=len(self.frame), min_periods=1 + ).corr() + + tm.assert_frame_equal(result, rolling_result) + + def test_expanding_corr_pairwise(self): + result = self.frame.expanding().corr() + + rolling_result = self.frame.rolling( + window=len(self.frame), min_periods=1 + ).corr() + tm.assert_frame_equal(result, rolling_result) + + def test_expanding_cov_diff_index(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = s1.expanding().cov(s2) + expected = Series([None, None, 2.0]) + tm.assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = s1.expanding().cov(s2a) + tm.assert_series_equal(result, expected) + + s1 = Series([7, 8, 10], index=[0, 1, 3]) + s2 = Series([7, 9, 10], index=[0, 2, 3]) + result = s1.expanding().cov(s2) + expected = Series([None, None, None, 4.5]) + tm.assert_series_equal(result, expected) + + def test_expanding_corr_diff_index(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = s1.expanding().corr(s2) + expected = Series([None, None, 1.0]) + tm.assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = s1.expanding().corr(s2a) + tm.assert_series_equal(result, expected) + + s1 = Series([7, 8, 10], index=[0, 1, 3]) + s2 = Series([7, 9, 10], index=[0, 2, 3]) + result = s1.expanding().corr(s2) + expected = Series([None, None, None, 1.0]) + tm.assert_series_equal(result, expected) + + def test_rolling_cov_diff_length(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = s1.rolling(window=3, min_periods=2).cov(s2) + expected = Series([None, None, 2.0]) + tm.assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = s1.rolling(window=3, min_periods=2).cov(s2a) + tm.assert_series_equal(result, expected) + + def test_rolling_corr_diff_length(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = s1.rolling(window=3, min_periods=2).corr(s2) + expected = Series([None, None, 1.0]) + tm.assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = s1.rolling(window=3, min_periods=2).corr(s2a) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "f", + [ + lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=False)), + lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=False)), + lambda x: x.rolling(window=10, min_periods=5).max(), + lambda x: x.rolling(window=10, min_periods=5).min(), + lambda x: x.rolling(window=10, min_periods=5).sum(), + lambda x: x.rolling(window=10, min_periods=5).mean(), + lambda x: x.rolling(window=10, min_periods=5).std(), + lambda x: x.rolling(window=10, min_periods=5).var(), + lambda x: x.rolling(window=10, min_periods=5).skew(), + lambda x: x.rolling(window=10, min_periods=5).kurt(), + lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5), + lambda x: x.rolling(window=10, min_periods=5).median(), + lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), + lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), + lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), + ], + ) + def test_rolling_functions_window_non_shrinkage(self, f): + # GH 7764 + s = Series(range(4)) + s_expected = Series(np.nan, index=s.index) + df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"]) + df_expected = DataFrame(np.nan, index=df.index, columns=df.columns) + + try: + s_result = f(s) + tm.assert_series_equal(s_result, s_expected) + + df_result = f(df) + tm.assert_frame_equal(df_result, df_expected) + except (ImportError): + + # scipy needed for rolling_window + pytest.skip("scipy not available") + + def test_rolling_functions_window_non_shrinkage_binary(self): + + # corr/cov return a MI DataFrame + df = DataFrame( + [[1, 5], [3, 2], [3, 9], [-1, 0]], + columns=Index(["A", "B"], name="foo"), + index=Index(range(4), name="bar"), + ) + df_expected = DataFrame( + columns=Index(["A", "B"], name="foo"), + index=pd.MultiIndex.from_product( + [df.index, df.columns], names=["bar", "foo"] + ), + dtype="float64", + ) + functions = [ + lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), + lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), + ] + for f in functions: + df_result = f(df) + tm.assert_frame_equal(df_result, df_expected) + + def test_moment_functions_zero_length(self): + # GH 8056 + s = Series() + s_expected = s + df1 = DataFrame() + df1_expected = df1 + df2 = DataFrame(columns=["a"]) + df2["a"] = df2["a"].astype("float64") + df2_expected = df2 + + functions = [ + lambda x: x.expanding().count(), + lambda x: x.expanding(min_periods=5).cov(x, pairwise=False), + lambda x: x.expanding(min_periods=5).corr(x, pairwise=False), + lambda x: x.expanding(min_periods=5).max(), + lambda x: x.expanding(min_periods=5).min(), + lambda x: x.expanding(min_periods=5).sum(), + lambda x: x.expanding(min_periods=5).mean(), + lambda x: x.expanding(min_periods=5).std(), + lambda x: x.expanding(min_periods=5).var(), + lambda x: x.expanding(min_periods=5).skew(), + lambda x: x.expanding(min_periods=5).kurt(), + lambda x: x.expanding(min_periods=5).quantile(0.5), + lambda x: x.expanding(min_periods=5).median(), + lambda x: x.expanding(min_periods=5).apply(sum, raw=False), + lambda x: x.expanding(min_periods=5).apply(sum, raw=True), + lambda x: x.rolling(window=10).count(), + lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False), + lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False), + lambda x: x.rolling(window=10, min_periods=5).max(), + lambda x: x.rolling(window=10, min_periods=5).min(), + lambda x: x.rolling(window=10, min_periods=5).sum(), + lambda x: x.rolling(window=10, min_periods=5).mean(), + lambda x: x.rolling(window=10, min_periods=5).std(), + lambda x: x.rolling(window=10, min_periods=5).var(), + lambda x: x.rolling(window=10, min_periods=5).skew(), + lambda x: x.rolling(window=10, min_periods=5).kurt(), + lambda x: x.rolling(window=10, min_periods=5).quantile(0.5), + lambda x: x.rolling(window=10, min_periods=5).median(), + lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), + lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), + lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), + ] + for f in functions: + try: + s_result = f(s) + tm.assert_series_equal(s_result, s_expected) + + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) + + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) + except (ImportError): + + # scipy needed for rolling_window + continue + + def test_moment_functions_zero_length_pairwise(self): + + df1 = DataFrame() + df1_expected = df1 + df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar")) + df2["a"] = df2["a"].astype("float64") + + df1_expected = DataFrame( + index=pd.MultiIndex.from_product([df1.index, df1.columns]), + columns=Index([]), + ) + df2_expected = DataFrame( + index=pd.MultiIndex.from_product( + [df2.index, df2.columns], names=["bar", "foo"] + ), + columns=Index(["a"], name="foo"), + dtype="float64", + ) + + functions = [ + lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)), + lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)), + lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), + lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), + ] + for f in functions: + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) + + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) + + def test_expanding_cov_pairwise_diff_length(self): + # GH 7512 + df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo")) + df1a = DataFrame( + [[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo") + ) + df2 = DataFrame( + [[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo") + ) + df2a = DataFrame( + [[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo") + ) + # TODO: xref gh-15826 + # .loc is not preserving the names + result1 = df1.expanding().cov(df2a, pairwise=True).loc[2] + result2 = df1.expanding().cov(df2a, pairwise=True).loc[2] + result3 = df1a.expanding().cov(df2, pairwise=True).loc[2] + result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2] + expected = DataFrame( + [[-3.0, -6.0], [-5.0, -10.0]], + columns=Index(["A", "B"], name="foo"), + index=Index(["X", "Y"], name="foo"), + ) + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected) + tm.assert_frame_equal(result4, expected) + + def test_expanding_corr_pairwise_diff_length(self): + # GH 7512 + df1 = DataFrame( + [[1, 2], [3, 2], [3, 4]], + columns=["A", "B"], + index=Index(range(3), name="bar"), + ) + df1a = DataFrame( + [[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"] + ) + df2 = DataFrame( + [[5, 6], [None, None], [2, 1]], + columns=["X", "Y"], + index=Index(range(3), name="bar"), + ) + df2a = DataFrame( + [[5, 6], [2, 1]], index=Index([0, 2], name="bar"), columns=["X", "Y"] + ) + result1 = df1.expanding().corr(df2, pairwise=True).loc[2] + result2 = df1.expanding().corr(df2a, pairwise=True).loc[2] + result3 = df1a.expanding().corr(df2, pairwise=True).loc[2] + result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2] + expected = DataFrame( + [[-1.0, -1.0], [-1.0, -1.0]], columns=["A", "B"], index=Index(["X", "Y"]) + ) + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected) + tm.assert_frame_equal(result4, expected) + + def test_rolling_skew_edge_cases(self): + + all_nan = Series([np.NaN] * 5) + + # yields all NaN (0 variance) + d = Series([1] * 5) + x = d.rolling(window=5).skew() + tm.assert_series_equal(all_nan, x) + + # yields all NaN (window too small) + d = Series(np.random.randn(5)) + x = d.rolling(window=2).skew() + tm.assert_series_equal(all_nan, x) + + # yields [NaN, NaN, NaN, 0.177994, 1.548824] + d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401]) + expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824]) + x = d.rolling(window=4).skew() + tm.assert_series_equal(expected, x) + + def test_rolling_kurt_edge_cases(self): + + all_nan = Series([np.NaN] * 5) + + # yields all NaN (0 variance) + d = Series([1] * 5) + x = d.rolling(window=5).kurt() + tm.assert_series_equal(all_nan, x) + + # yields all NaN (window too small) + d = Series(np.random.randn(5)) + x = d.rolling(window=3).kurt() + tm.assert_series_equal(all_nan, x) + + # yields [NaN, NaN, NaN, 1.224307, 2.671499] + d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401]) + expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499]) + x = d.rolling(window=4).kurt() + tm.assert_series_equal(expected, x) + + def test_rolling_skew_eq_value_fperr(self): + # #18804 all rolling skew for all equal values should return Nan + a = Series([1.1] * 15).rolling(window=10).skew() + assert np.isnan(a).all() + + def test_rolling_kurt_eq_value_fperr(self): + # #18804 all rolling kurt for all equal values should return Nan + a = Series([1.1] * 15).rolling(window=10).kurt() + assert np.isnan(a).all() + + @pytest.mark.parametrize( + "func,static_comp", + [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)], + ids=["sum", "mean", "max", "min"], + ) + def test_expanding_func(self, func, static_comp): + def expanding_func(x, min_periods=1, center=False, axis=0): + exp = x.expanding(min_periods=min_periods, center=center, axis=axis) + return getattr(exp, func)() + + self._check_expanding(expanding_func, static_comp, preserve_nan=False) + + def test_expanding_apply(self, raw): + def expanding_mean(x, min_periods=1): + + exp = x.expanding(min_periods=min_periods) + result = exp.apply(lambda x: x.mean(), raw=raw) + return result + + # TODO(jreback), needed to add preserve_nan=False + # here to make this pass + self._check_expanding(expanding_mean, np.mean, preserve_nan=False) + + ser = Series([]) + tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean(), raw=raw)) + + # GH 8080 + s = Series([None, None, None]) + result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw) + expected = Series([1.0, 2.0, 3.0]) + tm.assert_series_equal(result, expected) + + def _check_expanding( + self, + func, + static_comp, + has_min_periods=True, + has_time_rule=True, + preserve_nan=True, + ): + + series_result = func(self.series) + assert isinstance(series_result, Series) + frame_result = func(self.frame) + assert isinstance(frame_result, DataFrame) + + result = func(self.series) + tm.assert_almost_equal(result[10], static_comp(self.series[:11])) + + if preserve_nan: + assert result.iloc[self._nan_locs].isna().all() + + ser = Series(randn(50)) + + if has_min_periods: + result = func(ser, min_periods=30) + assert result[:29].isna().all() + tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) + + # min_periods is working correctly + result = func(ser, min_periods=15) + assert isna(result.iloc[13]) + assert notna(result.iloc[14]) + + ser2 = Series(randn(20)) + result = func(ser2, min_periods=5) + assert isna(result[3]) + assert notna(result[4]) + + # min_periods=0 + result0 = func(ser, min_periods=0) + result1 = func(ser, min_periods=1) + tm.assert_almost_equal(result0, result1) + else: + result = func(ser) + tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) + + def test_rolling_max_gh6297(self): + """Replicate result expected in GH #6297""" + + indices = [datetime(1975, 1, i) for i in range(1, 6)] + # So that we can have 2 datapoints on one of the days + indices.append(datetime(1975, 1, 3, 6, 0)) + series = Series(range(1, 7), index=indices) + # Use floats instead of ints as values + series = series.map(lambda x: float(x)) + # Sort chronologically + series = series.sort_index() + + expected = Series( + [1.0, 2.0, 6.0, 4.0, 5.0], + index=[datetime(1975, 1, i, 0) for i in range(1, 6)], + ) + x = series.resample("D").max().rolling(window=1).max() + tm.assert_series_equal(expected, x) + + def test_rolling_max_resample(self): + + indices = [datetime(1975, 1, i) for i in range(1, 6)] + # So that we can have 3 datapoints on last day (4, 10, and 20) + indices.append(datetime(1975, 1, 5, 1)) + indices.append(datetime(1975, 1, 5, 2)) + series = Series(list(range(0, 5)) + [10, 20], index=indices) + # Use floats instead of ints as values + series = series.map(lambda x: float(x)) + # Sort chronologically + series = series.sort_index() + + # Default how should be max + expected = Series( + [0.0, 1.0, 2.0, 3.0, 20.0], + index=[datetime(1975, 1, i, 0) for i in range(1, 6)], + ) + x = series.resample("D").max().rolling(window=1).max() + tm.assert_series_equal(expected, x) + + # Now specify median (10.0) + expected = Series( + [0.0, 1.0, 2.0, 3.0, 10.0], + index=[datetime(1975, 1, i, 0) for i in range(1, 6)], + ) + x = series.resample("D").median().rolling(window=1).max() + tm.assert_series_equal(expected, x) + + # Now specify mean (4+10+20)/3 + v = (4.0 + 10.0 + 20.0) / 3.0 + expected = Series( + [0.0, 1.0, 2.0, 3.0, v], + index=[datetime(1975, 1, i, 0) for i in range(1, 6)], + ) + x = series.resample("D").mean().rolling(window=1).max() + tm.assert_series_equal(expected, x) + + def test_rolling_min_resample(self): + + indices = [datetime(1975, 1, i) for i in range(1, 6)] + # So that we can have 3 datapoints on last day (4, 10, and 20) + indices.append(datetime(1975, 1, 5, 1)) + indices.append(datetime(1975, 1, 5, 2)) + series = Series(list(range(0, 5)) + [10, 20], index=indices) + # Use floats instead of ints as values + series = series.map(lambda x: float(x)) + # Sort chronologically + series = series.sort_index() + + # Default how should be min + expected = Series( + [0.0, 1.0, 2.0, 3.0, 4.0], + index=[datetime(1975, 1, i, 0) for i in range(1, 6)], + ) + r = series.resample("D").min().rolling(window=1) + tm.assert_series_equal(expected, r.min()) + + def test_rolling_median_resample(self): + + indices = [datetime(1975, 1, i) for i in range(1, 6)] + # So that we can have 3 datapoints on last day (4, 10, and 20) + indices.append(datetime(1975, 1, 5, 1)) + indices.append(datetime(1975, 1, 5, 2)) + series = Series(list(range(0, 5)) + [10, 20], index=indices) + # Use floats instead of ints as values + series = series.map(lambda x: float(x)) + # Sort chronologically + series = series.sort_index() + + # Default how should be median + expected = Series( + [0.0, 1.0, 2.0, 3.0, 10], + index=[datetime(1975, 1, i, 0) for i in range(1, 6)], + ) + x = series.resample("D").median().rolling(window=1).median() + tm.assert_series_equal(expected, x) + + def test_rolling_median_memory_error(self): + # GH11722 + n = 20000 + Series(np.random.randn(n)).rolling(window=2, center=False).median() + Series(np.random.randn(n)).rolling(window=2, center=False).median() + + def test_rolling_min_max_numeric_types(self): + + # GH12373 + types_test = [np.dtype("f{}".format(width)) for width in [4, 8]] + types_test.extend( + [ + np.dtype("{}{}".format(sign, width)) + for width in [1, 2, 4, 8] + for sign in "ui" + ] + ) + for data_type in types_test: + # Just testing that these don't throw exceptions and that + # the return type is float64. Other tests will cover quantitative + # correctness + result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max() + assert result.dtypes[0] == np.dtype("f8") + result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min() + assert result.dtypes[0] == np.dtype("f8") diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py new file mode 100644 index 0000000000000..c7177e1d3914f --- /dev/null +++ b/pandas/tests/window/test_rolling.py @@ -0,0 +1,328 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas.errors import UnsupportedFunctionCall +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import DataFrame, Series +import pandas.core.window as rwindow +from pandas.tests.window.common import Base +import pandas.util.testing as tm + + +class TestRolling(Base): + def setup_method(self, method): + self._create_data() + + def test_doc_string(self): + + df = DataFrame({"B": [0, 1, 2, np.nan, 4]}) + df + df.rolling(2).sum() + df.rolling(2, min_periods=1).sum() + + @pytest.mark.parametrize("which", ["series", "frame"]) + def test_constructor(self, which): + # GH 12669 + + o = getattr(self, which) + c = o.rolling + + # valid + c(window=2) + c(window=2, min_periods=1) + c(window=2, min_periods=1, center=True) + c(window=2, min_periods=1, center=False) + + # GH 13383 + with pytest.raises(ValueError): + c(0) + c(-1) + + # not valid + for w in [2.0, "foo", np.array([2])]: + with pytest.raises(ValueError): + c(window=w) + with pytest.raises(ValueError): + c(window=2, min_periods=w) + with pytest.raises(ValueError): + c(window=2, min_periods=1, center=w) + + @td.skip_if_no_scipy + @pytest.mark.parametrize("which", ["series", "frame"]) + def test_constructor_with_win_type(self, which): + # GH 13383 + o = getattr(self, which) + c = o.rolling + with pytest.raises(ValueError): + c(-1, win_type="boxcar") + + @pytest.mark.parametrize("window", [timedelta(days=3), pd.Timedelta(days=3)]) + def test_constructor_with_timedelta_window(self, window): + # GH 15440 + n = 10 + df = DataFrame( + {"value": np.arange(n)}, + index=pd.date_range("2015-12-24", periods=n, freq="D"), + ) + expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3)) + + result = df.rolling(window=window).sum() + expected = DataFrame( + {"value": expected_data}, + index=pd.date_range("2015-12-24", periods=n, freq="D"), + ) + tm.assert_frame_equal(result, expected) + expected = df.rolling("3D").sum() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("window", [timedelta(days=3), pd.Timedelta(days=3), "3D"]) + def test_constructor_timedelta_window_and_minperiods(self, window, raw): + # GH 15305 + n = 10 + df = DataFrame( + {"value": np.arange(n)}, + index=pd.date_range("2017-08-08", periods=n, freq="D"), + ) + expected = DataFrame( + {"value": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))}, + index=pd.date_range("2017-08-08", periods=n, freq="D"), + ) + result_roll_sum = df.rolling(window=window, min_periods=2).sum() + result_roll_generic = df.rolling(window=window, min_periods=2).apply( + sum, raw=raw + ) + tm.assert_frame_equal(result_roll_sum, expected) + tm.assert_frame_equal(result_roll_generic, expected) + + @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) + def test_numpy_compat(self, method): + # see gh-12811 + r = rwindow.Rolling(Series([2, 4, 6]), window=2) + + msg = "numpy operations are not valid with window objects" + + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(r, method)(1, 2, 3) + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(r, method)(dtype=np.float64) + + def test_closed(self): + df = DataFrame({"A": [0, 1, 2, 3, 4]}) + # closed only allowed for datetimelike + with pytest.raises(ValueError): + df.rolling(window=3, closed="neither") + + @pytest.mark.parametrize("closed", ["neither", "left"]) + def test_closed_empty(self, closed, arithmetic_win_operators): + # GH 26005 + func_name = arithmetic_win_operators + ser = pd.Series( + data=np.arange(5), index=pd.date_range("2000", periods=5, freq="2D") + ) + roll = ser.rolling("1D", closed=closed) + + result = getattr(roll, func_name)() + expected = pd.Series([np.nan] * 5, index=ser.index) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("func", ["min", "max"]) + def test_closed_one_entry(self, func): + # GH24718 + ser = pd.Series(data=[2], index=pd.date_range("2000", periods=1)) + result = getattr(ser.rolling("10D", closed="left"), func)() + tm.assert_series_equal(result, pd.Series([np.nan], index=ser.index)) + + @pytest.mark.parametrize("func", ["min", "max"]) + def test_closed_one_entry_groupby(self, func): + # GH24718 + ser = pd.DataFrame( + data={"A": [1, 1, 2], "B": [3, 2, 1]}, + index=pd.date_range("2000", periods=3), + ) + result = getattr( + ser.groupby("A", sort=False)["B"].rolling("10D", closed="left"), func + )() + exp_idx = pd.MultiIndex.from_arrays( + arrays=[[1, 1, 2], ser.index], names=("A", None) + ) + expected = pd.Series(data=[np.nan, 3, np.nan], index=exp_idx, name="B") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("input_dtype", ["int", "float"]) + @pytest.mark.parametrize( + "func,closed,expected", + [ + ("min", "right", [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]), + ("min", "both", [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]), + ("min", "neither", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]), + ("min", "left", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]), + ("max", "right", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("max", "both", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("max", "neither", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]), + ("max", "left", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]), + ], + ) + def test_closed_min_max_datetime(self, input_dtype, func, closed, expected): + # see gh-21704 + ser = pd.Series( + data=np.arange(10).astype(input_dtype), + index=pd.date_range("2000", periods=10), + ) + + result = getattr(ser.rolling("3D", closed=closed), func)() + expected = pd.Series(expected, index=ser.index) + tm.assert_series_equal(result, expected) + + def test_closed_uneven(self): + # see gh-21704 + ser = pd.Series(data=np.arange(10), index=pd.date_range("2000", periods=10)) + + # uneven + ser = ser.drop(index=ser.index[[1, 5]]) + result = ser.rolling("3D", closed="left").min() + expected = pd.Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "func,closed,expected", + [ + ("min", "right", [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), + ("min", "both", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]), + ("min", "neither", [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), + ("min", "left", [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]), + ("max", "right", [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]), + ("max", "both", [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]), + ("max", "neither", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]), + ("max", "left", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]), + ], + ) + def test_closed_min_max_minp(self, func, closed, expected): + # see gh-21704 + ser = pd.Series(data=np.arange(10), index=pd.date_range("2000", periods=10)) + ser[ser.index[-3:]] = np.nan + result = getattr(ser.rolling("3D", min_periods=2, closed=closed), func)() + expected = pd.Series(expected, index=ser.index) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "closed,expected", + [ + ("right", [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]), + ("both", [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]), + ("neither", [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]), + ("left", [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7]), + ], + ) + def test_closed_median_quantile(self, closed, expected): + # GH 26005 + ser = pd.Series(data=np.arange(10), index=pd.date_range("2000", periods=10)) + roll = ser.rolling("3D", closed=closed) + expected = pd.Series(expected, index=ser.index) + + result = roll.median() + tm.assert_series_equal(result, expected) + + result = roll.quantile(0.5) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("roller", ["1s", 1]) + def tests_empty_df_rolling(self, roller): + # GH 15819 Verifies that datetime and integer rolling windows can be + # applied to empty DataFrames + expected = DataFrame() + result = DataFrame().rolling(roller).sum() + tm.assert_frame_equal(result, expected) + + # Verifies that datetime and integer rolling windows can be applied to + # empty DataFrames with datetime index + expected = DataFrame(index=pd.DatetimeIndex([])) + result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum() + tm.assert_frame_equal(result, expected) + + def test_empty_window_median_quantile(self): + # GH 26005 + expected = pd.Series([np.nan, np.nan, np.nan]) + roll = pd.Series(np.arange(3)).rolling(0) + + result = roll.median() + tm.assert_series_equal(result, expected) + + result = roll.quantile(0.1) + tm.assert_series_equal(result, expected) + + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.rolling(1, min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.rolling(1, min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + + def test_missing_minp_zero_variable(self): + # https://github.com/pandas-dev/pandas/pull/18921 + x = pd.Series( + [np.nan] * 4, + index=pd.DatetimeIndex( + ["2017-01-01", "2017-01-04", "2017-01-06", "2017-01-07"] + ), + ) + result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum() + expected = pd.Series(0.0, index=x.index) + tm.assert_series_equal(result, expected) + + def test_multi_index_names(self): + + # GH 16789, 16825 + cols = pd.MultiIndex.from_product( + [["A", "B"], ["C", "D", "E"]], names=["1", "2"] + ) + df = DataFrame(np.ones((10, 6)), columns=cols) + result = df.rolling(3).cov() + + tm.assert_index_equal(result.columns, df.columns) + assert result.index.names == [None, "1", "2"] + + @pytest.mark.parametrize("klass", [pd.Series, pd.DataFrame]) + def test_iter_raises(self, klass): + # https://github.com/pandas-dev/pandas/issues/11704 + # Iteration over a Window + obj = klass([1, 2, 3, 4]) + with pytest.raises(NotImplementedError): + iter(obj.rolling(2)) + + def test_rolling_axis_sum(self, axis_frame): + # see gh-23372. + df = DataFrame(np.ones((10, 20))) + axis = df._get_axis_number(axis_frame) + + if axis == 0: + expected = DataFrame({i: [np.nan] * 2 + [3.0] * 8 for i in range(20)}) + else: + # axis == 1 + expected = DataFrame([[np.nan] * 2 + [3.0] * 18] * 10) + + result = df.rolling(3, axis=axis_frame).sum() + tm.assert_frame_equal(result, expected) + + def test_rolling_axis_count(self, axis_frame): + # see gh-26055 + df = DataFrame({"x": range(3), "y": range(3)}) + + axis = df._get_axis_number(axis_frame) + + if axis in [0, "index"]: + expected = DataFrame({"x": [1.0, 2.0, 2.0], "y": [1.0, 2.0, 2.0]}) + else: + expected = DataFrame({"x": [1.0, 1.0, 1.0], "y": [2.0, 2.0, 2.0]}) + + result = df.rolling(2, axis=axis_frame).count() + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py index 3945a8aaa8b87..a6a56c98a9377 100644 --- a/pandas/tests/window/test_window.py +++ b/pandas/tests/window/test_window.py @@ -1,393 +1,13 @@ -from collections import OrderedDict -import copy -from datetime import datetime, timedelta -import warnings -from warnings import catch_warnings - import numpy as np -from numpy.random import randn import pytest from pandas.errors import UnsupportedFunctionCall import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, Index, Series, Timestamp, bdate_range, concat, isna, notna -from pandas.core.base import SpecificationError +from pandas import Series import pandas.core.window as rwindow -import pandas.util.testing as tm - -import pandas.tseries.offsets as offsets - -N, K = 100, 10 - - -class Base: - - _nan_locs = np.arange(20, 40) - _inf_locs = np.array([]) - - def _create_data(self): - arr = randn(N) - arr[self._nan_locs] = np.NaN - - self.arr = arr - self.rng = bdate_range(datetime(2009, 1, 1), periods=N) - self.series = Series(arr.copy(), index=self.rng) - self.frame = DataFrame(randn(N, K), index=self.rng, columns=np.arange(K)) - - -class TestApi(Base): - def setup_method(self, method): - self._create_data() - - def test_getitem(self): - - r = self.frame.rolling(window=5) - tm.assert_index_equal(r._selected_obj.columns, self.frame.columns) - - r = self.frame.rolling(window=5)[1] - assert r._selected_obj.name == self.frame.columns[1] - - # technically this is allowed - r = self.frame.rolling(window=5)[1, 3] - tm.assert_index_equal(r._selected_obj.columns, self.frame.columns[[1, 3]]) - - r = self.frame.rolling(window=5)[[1, 3]] - tm.assert_index_equal(r._selected_obj.columns, self.frame.columns[[1, 3]]) - - def test_select_bad_cols(self): - df = DataFrame([[1, 2]], columns=["A", "B"]) - g = df.rolling(window=5) - with pytest.raises(KeyError, match="Columns not found: 'C'"): - g[["C"]] - with pytest.raises(KeyError, match="^[^A]+$"): - # A should not be referenced as a bad column... - # will have to rethink regex if you change message! - g[["A", "C"]] - - def test_attribute_access(self): - - df = DataFrame([[1, 2]], columns=["A", "B"]) - r = df.rolling(window=5) - tm.assert_series_equal(r.A.sum(), r["A"].sum()) - msg = "'Rolling' object has no attribute 'F'" - with pytest.raises(AttributeError, match=msg): - r.F - - def tests_skip_nuisance(self): - - df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"}) - r = df.rolling(window=3) - result = r[["A", "B"]].sum() - expected = DataFrame( - {"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]}, - columns=list("AB"), - ) - tm.assert_frame_equal(result, expected) - - def test_skip_sum_object_raises(self): - df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"}) - r = df.rolling(window=3) - result = r.sum() - expected = DataFrame( - {"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]}, - columns=list("AB"), - ) - tm.assert_frame_equal(result, expected) - - def test_agg(self): - df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) - - r = df.rolling(window=3) - a_mean = r["A"].mean() - a_std = r["A"].std() - a_sum = r["A"].sum() - b_mean = r["B"].mean() - b_std = r["B"].std() - b_sum = r["B"].sum() - - result = r.aggregate([np.mean, np.std]) - expected = concat([a_mean, a_std, b_mean, b_std], axis=1) - expected.columns = pd.MultiIndex.from_product([["A", "B"], ["mean", "std"]]) - tm.assert_frame_equal(result, expected) - - result = r.aggregate({"A": np.mean, "B": np.std}) - - expected = concat([a_mean, b_std], axis=1) - tm.assert_frame_equal(result, expected, check_like=True) - - result = r.aggregate({"A": ["mean", "std"]}) - expected = concat([a_mean, a_std], axis=1) - expected.columns = pd.MultiIndex.from_tuples([("A", "mean"), ("A", "std")]) - tm.assert_frame_equal(result, expected) - - result = r["A"].aggregate(["mean", "sum"]) - expected = concat([a_mean, a_sum], axis=1) - expected.columns = ["mean", "sum"] - tm.assert_frame_equal(result, expected) - - with catch_warnings(record=True): - # using a dict with renaming - warnings.simplefilter("ignore", FutureWarning) - result = r.aggregate({"A": {"mean": "mean", "sum": "sum"}}) - expected = concat([a_mean, a_sum], axis=1) - expected.columns = pd.MultiIndex.from_tuples([("A", "mean"), ("A", "sum")]) - tm.assert_frame_equal(result, expected, check_like=True) - - with catch_warnings(record=True): - warnings.simplefilter("ignore", FutureWarning) - result = r.aggregate( - { - "A": {"mean": "mean", "sum": "sum"}, - "B": {"mean2": "mean", "sum2": "sum"}, - } - ) - expected = concat([a_mean, a_sum, b_mean, b_sum], axis=1) - exp_cols = [("A", "mean"), ("A", "sum"), ("B", "mean2"), ("B", "sum2")] - expected.columns = pd.MultiIndex.from_tuples(exp_cols) - tm.assert_frame_equal(result, expected, check_like=True) - - result = r.aggregate({"A": ["mean", "std"], "B": ["mean", "std"]}) - expected = concat([a_mean, a_std, b_mean, b_std], axis=1) - - exp_cols = [("A", "mean"), ("A", "std"), ("B", "mean"), ("B", "std")] - expected.columns = pd.MultiIndex.from_tuples(exp_cols) - tm.assert_frame_equal(result, expected, check_like=True) - - def test_agg_apply(self, raw): - - # passed lambda - df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) - - r = df.rolling(window=3) - a_sum = r["A"].sum() - - result = r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)}) - rcustom = r["B"].apply(lambda x: np.std(x, ddof=1), raw=raw) - expected = concat([a_sum, rcustom], axis=1) - tm.assert_frame_equal(result, expected, check_like=True) - - def test_agg_consistency(self): - - df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) - r = df.rolling(window=3) - - result = r.agg([np.sum, np.mean]).columns - expected = pd.MultiIndex.from_product([list("AB"), ["sum", "mean"]]) - tm.assert_index_equal(result, expected) - - result = r["A"].agg([np.sum, np.mean]).columns - expected = Index(["sum", "mean"]) - tm.assert_index_equal(result, expected) - - result = r.agg({"A": [np.sum, np.mean]}).columns - expected = pd.MultiIndex.from_tuples([("A", "sum"), ("A", "mean")]) - tm.assert_index_equal(result, expected) - - def test_agg_nested_dicts(self): - - # API change for disallowing these types of nested dicts - df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) - r = df.rolling(window=3) - - msg = r"cannot perform renaming for (r1|r2) with a nested dictionary" - with pytest.raises(SpecificationError, match=msg): - r.aggregate({"r1": {"A": ["mean", "sum"]}, "r2": {"B": ["mean", "sum"]}}) - - expected = concat( - [r["A"].mean(), r["A"].std(), r["B"].mean(), r["B"].std()], axis=1 - ) - expected.columns = pd.MultiIndex.from_tuples( - [("ra", "mean"), ("ra", "std"), ("rb", "mean"), ("rb", "std")] - ) - with catch_warnings(record=True): - warnings.simplefilter("ignore", FutureWarning) - result = r[["A", "B"]].agg( - {"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}} - ) - tm.assert_frame_equal(result, expected, check_like=True) - - with catch_warnings(record=True): - warnings.simplefilter("ignore", FutureWarning) - result = r.agg({"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}}) - expected.columns = pd.MultiIndex.from_tuples( - [ - ("A", "ra", "mean"), - ("A", "ra", "std"), - ("B", "rb", "mean"), - ("B", "rb", "std"), - ] - ) - tm.assert_frame_equal(result, expected, check_like=True) - - def test_count_nonnumeric_types(self): - # GH12541 - cols = [ - "int", - "float", - "string", - "datetime", - "timedelta", - "periods", - "fl_inf", - "fl_nan", - "str_nan", - "dt_nat", - "periods_nat", - ] - - df = DataFrame( - { - "int": [1, 2, 3], - "float": [4.0, 5.0, 6.0], - "string": list("abc"), - "datetime": pd.date_range("20170101", periods=3), - "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"), - "periods": [ - pd.Period("2012-01"), - pd.Period("2012-02"), - pd.Period("2012-03"), - ], - "fl_inf": [1.0, 2.0, np.Inf], - "fl_nan": [1.0, 2.0, np.NaN], - "str_nan": ["aa", "bb", np.NaN], - "dt_nat": [ - Timestamp("20170101"), - Timestamp("20170203"), - Timestamp(None), - ], - "periods_nat": [ - pd.Period("2012-01"), - pd.Period("2012-02"), - pd.Period(None), - ], - }, - columns=cols, - ) - - expected = DataFrame( - { - "int": [1.0, 2.0, 2.0], - "float": [1.0, 2.0, 2.0], - "string": [1.0, 2.0, 2.0], - "datetime": [1.0, 2.0, 2.0], - "timedelta": [1.0, 2.0, 2.0], - "periods": [1.0, 2.0, 2.0], - "fl_inf": [1.0, 2.0, 2.0], - "fl_nan": [1.0, 2.0, 1.0], - "str_nan": [1.0, 2.0, 1.0], - "dt_nat": [1.0, 2.0, 1.0], - "periods_nat": [1.0, 2.0, 1.0], - }, - columns=cols, - ) - - result = df.rolling(window=2).count() - tm.assert_frame_equal(result, expected) - - result = df.rolling(1).count() - expected = df.notna().astype(float) - tm.assert_frame_equal(result, expected) - - @td.skip_if_no_scipy - @pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning") - def test_window_with_args(self): - # make sure that we are aggregating window functions correctly with arg - r = Series(np.random.randn(100)).rolling( - window=10, min_periods=1, win_type="gaussian" - ) - expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) - expected.columns = ["<lambda>", "<lambda>"] - result = r.aggregate([lambda x: x.mean(std=10), lambda x: x.mean(std=0.01)]) - tm.assert_frame_equal(result, expected) - - def a(x): - return x.mean(std=10) - - def b(x): - return x.mean(std=0.01) - - expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) - expected.columns = ["a", "b"] - result = r.aggregate([a, b]) - tm.assert_frame_equal(result, expected) - - def test_preserve_metadata(self): - # GH 10565 - s = Series(np.arange(100), name="foo") - - s2 = s.rolling(30).sum() - s3 = s.rolling(20).sum() - assert s2.name == "foo" - assert s3.name == "foo" - - @pytest.mark.parametrize( - "func,window_size,expected_vals", - [ - ( - "rolling", - 2, - [ - [np.nan, np.nan, np.nan, np.nan], - [15.0, 20.0, 25.0, 20.0], - [25.0, 30.0, 35.0, 30.0], - [np.nan, np.nan, np.nan, np.nan], - [20.0, 30.0, 35.0, 30.0], - [35.0, 40.0, 60.0, 40.0], - [60.0, 80.0, 85.0, 80], - ], - ), - ( - "expanding", - None, - [ - [10.0, 10.0, 20.0, 20.0], - [15.0, 20.0, 25.0, 20.0], - [20.0, 30.0, 30.0, 20.0], - [10.0, 10.0, 30.0, 30.0], - [20.0, 30.0, 35.0, 30.0], - [26.666667, 40.0, 50.0, 30.0], - [40.0, 80.0, 60.0, 30.0], - ], - ), - ], - ) - def test_multiple_agg_funcs(self, func, window_size, expected_vals): - # GH 15072 - df = pd.DataFrame( - [ - ["A", 10, 20], - ["A", 20, 30], - ["A", 30, 40], - ["B", 10, 30], - ["B", 30, 40], - ["B", 40, 80], - ["B", 80, 90], - ], - columns=["stock", "low", "high"], - ) - - f = getattr(df.groupby("stock"), func) - if window_size: - window = f(window_size) - else: - window = f() - - index = pd.MultiIndex.from_tuples( - [("A", 0), ("A", 1), ("A", 2), ("B", 3), ("B", 4), ("B", 5), ("B", 6)], - names=["stock", None], - ) - columns = pd.MultiIndex.from_tuples( - [("low", "mean"), ("low", "max"), ("high", "mean"), ("high", "min")] - ) - expected = pd.DataFrame(expected_vals, index=index, columns=columns) - - result = window.agg( - OrderedDict((("low", ["mean", "max"]), ("high", ["mean", "min"]))) - ) - - tm.assert_frame_equal(result, expected) +from pandas.tests.window.common import Base @pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning") @@ -454,3200 +74,3 @@ def test_agg_function_support(self, arg): with pytest.raises(AttributeError, match=msg): roll.agg({"A": arg}) - - -class TestRolling(Base): - def setup_method(self, method): - self._create_data() - - def test_doc_string(self): - - df = DataFrame({"B": [0, 1, 2, np.nan, 4]}) - df - df.rolling(2).sum() - df.rolling(2, min_periods=1).sum() - - @pytest.mark.parametrize("which", ["series", "frame"]) - def test_constructor(self, which): - # GH 12669 - - o = getattr(self, which) - c = o.rolling - - # valid - c(window=2) - c(window=2, min_periods=1) - c(window=2, min_periods=1, center=True) - c(window=2, min_periods=1, center=False) - - # GH 13383 - with pytest.raises(ValueError): - c(0) - c(-1) - - # not valid - for w in [2.0, "foo", np.array([2])]: - with pytest.raises(ValueError): - c(window=w) - with pytest.raises(ValueError): - c(window=2, min_periods=w) - with pytest.raises(ValueError): - c(window=2, min_periods=1, center=w) - - @td.skip_if_no_scipy - @pytest.mark.parametrize("which", ["series", "frame"]) - def test_constructor_with_win_type(self, which): - # GH 13383 - o = getattr(self, which) - c = o.rolling - with pytest.raises(ValueError): - c(-1, win_type="boxcar") - - @pytest.mark.parametrize("window", [timedelta(days=3), pd.Timedelta(days=3)]) - def test_constructor_with_timedelta_window(self, window): - # GH 15440 - n = 10 - df = DataFrame( - {"value": np.arange(n)}, - index=pd.date_range("2015-12-24", periods=n, freq="D"), - ) - expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3)) - - result = df.rolling(window=window).sum() - expected = DataFrame( - {"value": expected_data}, - index=pd.date_range("2015-12-24", periods=n, freq="D"), - ) - tm.assert_frame_equal(result, expected) - expected = df.rolling("3D").sum() - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("window", [timedelta(days=3), pd.Timedelta(days=3), "3D"]) - def test_constructor_timedelta_window_and_minperiods(self, window, raw): - # GH 15305 - n = 10 - df = DataFrame( - {"value": np.arange(n)}, - index=pd.date_range("2017-08-08", periods=n, freq="D"), - ) - expected = DataFrame( - {"value": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))}, - index=pd.date_range("2017-08-08", periods=n, freq="D"), - ) - result_roll_sum = df.rolling(window=window, min_periods=2).sum() - result_roll_generic = df.rolling(window=window, min_periods=2).apply( - sum, raw=raw - ) - tm.assert_frame_equal(result_roll_sum, expected) - tm.assert_frame_equal(result_roll_generic, expected) - - @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) - def test_numpy_compat(self, method): - # see gh-12811 - r = rwindow.Rolling(Series([2, 4, 6]), window=2) - - msg = "numpy operations are not valid with window objects" - - with pytest.raises(UnsupportedFunctionCall, match=msg): - getattr(r, method)(1, 2, 3) - with pytest.raises(UnsupportedFunctionCall, match=msg): - getattr(r, method)(dtype=np.float64) - - def test_closed(self): - df = DataFrame({"A": [0, 1, 2, 3, 4]}) - # closed only allowed for datetimelike - with pytest.raises(ValueError): - df.rolling(window=3, closed="neither") - - @pytest.mark.parametrize("closed", ["neither", "left"]) - def test_closed_empty(self, closed, arithmetic_win_operators): - # GH 26005 - func_name = arithmetic_win_operators - ser = pd.Series( - data=np.arange(5), index=pd.date_range("2000", periods=5, freq="2D") - ) - roll = ser.rolling("1D", closed=closed) - - result = getattr(roll, func_name)() - expected = pd.Series([np.nan] * 5, index=ser.index) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("func", ["min", "max"]) - def test_closed_one_entry(self, func): - # GH24718 - ser = pd.Series(data=[2], index=pd.date_range("2000", periods=1)) - result = getattr(ser.rolling("10D", closed="left"), func)() - tm.assert_series_equal(result, pd.Series([np.nan], index=ser.index)) - - @pytest.mark.parametrize("func", ["min", "max"]) - def test_closed_one_entry_groupby(self, func): - # GH24718 - ser = pd.DataFrame( - data={"A": [1, 1, 2], "B": [3, 2, 1]}, - index=pd.date_range("2000", periods=3), - ) - result = getattr( - ser.groupby("A", sort=False)["B"].rolling("10D", closed="left"), func - )() - exp_idx = pd.MultiIndex.from_arrays( - arrays=[[1, 1, 2], ser.index], names=("A", None) - ) - expected = pd.Series(data=[np.nan, 3, np.nan], index=exp_idx, name="B") - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("input_dtype", ["int", "float"]) - @pytest.mark.parametrize( - "func,closed,expected", - [ - ("min", "right", [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]), - ("min", "both", [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]), - ("min", "neither", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]), - ("min", "left", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]), - ("max", "right", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), - ("max", "both", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), - ("max", "neither", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]), - ("max", "left", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]), - ], - ) - def test_closed_min_max_datetime(self, input_dtype, func, closed, expected): - # see gh-21704 - ser = pd.Series( - data=np.arange(10).astype(input_dtype), - index=pd.date_range("2000", periods=10), - ) - - result = getattr(ser.rolling("3D", closed=closed), func)() - expected = pd.Series(expected, index=ser.index) - tm.assert_series_equal(result, expected) - - def test_closed_uneven(self): - # see gh-21704 - ser = pd.Series(data=np.arange(10), index=pd.date_range("2000", periods=10)) - - # uneven - ser = ser.drop(index=ser.index[[1, 5]]) - result = ser.rolling("3D", closed="left").min() - expected = pd.Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "func,closed,expected", - [ - ("min", "right", [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), - ("min", "both", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]), - ("min", "neither", [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), - ("min", "left", [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]), - ("max", "right", [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]), - ("max", "both", [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]), - ("max", "neither", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]), - ("max", "left", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]), - ], - ) - def test_closed_min_max_minp(self, func, closed, expected): - # see gh-21704 - ser = pd.Series(data=np.arange(10), index=pd.date_range("2000", periods=10)) - ser[ser.index[-3:]] = np.nan - result = getattr(ser.rolling("3D", min_periods=2, closed=closed), func)() - expected = pd.Series(expected, index=ser.index) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "closed,expected", - [ - ("right", [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]), - ("both", [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]), - ("neither", [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]), - ("left", [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7]), - ], - ) - def test_closed_median_quantile(self, closed, expected): - # GH 26005 - ser = pd.Series(data=np.arange(10), index=pd.date_range("2000", periods=10)) - roll = ser.rolling("3D", closed=closed) - expected = pd.Series(expected, index=ser.index) - - result = roll.median() - tm.assert_series_equal(result, expected) - - result = roll.quantile(0.5) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("roller", ["1s", 1]) - def tests_empty_df_rolling(self, roller): - # GH 15819 Verifies that datetime and integer rolling windows can be - # applied to empty DataFrames - expected = DataFrame() - result = DataFrame().rolling(roller).sum() - tm.assert_frame_equal(result, expected) - - # Verifies that datetime and integer rolling windows can be applied to - # empty DataFrames with datetime index - expected = DataFrame(index=pd.DatetimeIndex([])) - result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum() - tm.assert_frame_equal(result, expected) - - def test_empty_window_median_quantile(self): - # GH 26005 - expected = pd.Series([np.nan, np.nan, np.nan]) - roll = pd.Series(np.arange(3)).rolling(0) - - result = roll.median() - tm.assert_series_equal(result, expected) - - result = roll.quantile(0.1) - tm.assert_series_equal(result, expected) - - def test_missing_minp_zero(self): - # https://github.com/pandas-dev/pandas/pull/18921 - # minp=0 - x = pd.Series([np.nan]) - result = x.rolling(1, min_periods=0).sum() - expected = pd.Series([0.0]) - tm.assert_series_equal(result, expected) - - # minp=1 - result = x.rolling(1, min_periods=1).sum() - expected = pd.Series([np.nan]) - tm.assert_series_equal(result, expected) - - def test_missing_minp_zero_variable(self): - # https://github.com/pandas-dev/pandas/pull/18921 - x = pd.Series( - [np.nan] * 4, - index=pd.DatetimeIndex( - ["2017-01-01", "2017-01-04", "2017-01-06", "2017-01-07"] - ), - ) - result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum() - expected = pd.Series(0.0, index=x.index) - tm.assert_series_equal(result, expected) - - def test_multi_index_names(self): - - # GH 16789, 16825 - cols = pd.MultiIndex.from_product( - [["A", "B"], ["C", "D", "E"]], names=["1", "2"] - ) - df = DataFrame(np.ones((10, 6)), columns=cols) - result = df.rolling(3).cov() - - tm.assert_index_equal(result.columns, df.columns) - assert result.index.names == [None, "1", "2"] - - @pytest.mark.parametrize("klass", [pd.Series, pd.DataFrame]) - def test_iter_raises(self, klass): - # https://github.com/pandas-dev/pandas/issues/11704 - # Iteration over a Window - obj = klass([1, 2, 3, 4]) - with pytest.raises(NotImplementedError): - iter(obj.rolling(2)) - - def test_rolling_axis_sum(self, axis_frame): - # see gh-23372. - df = DataFrame(np.ones((10, 20))) - axis = df._get_axis_number(axis_frame) - - if axis == 0: - expected = DataFrame({i: [np.nan] * 2 + [3.0] * 8 for i in range(20)}) - else: - # axis == 1 - expected = DataFrame([[np.nan] * 2 + [3.0] * 18] * 10) - - result = df.rolling(3, axis=axis_frame).sum() - tm.assert_frame_equal(result, expected) - - def test_rolling_axis_count(self, axis_frame): - # see gh-26055 - df = DataFrame({"x": range(3), "y": range(3)}) - - axis = df._get_axis_number(axis_frame) - - if axis in [0, "index"]: - expected = DataFrame({"x": [1.0, 2.0, 2.0], "y": [1.0, 2.0, 2.0]}) - else: - expected = DataFrame({"x": [1.0, 1.0, 1.0], "y": [2.0, 2.0, 2.0]}) - - result = df.rolling(2, axis=axis_frame).count() - tm.assert_frame_equal(result, expected) - - -class TestExpanding(Base): - def setup_method(self, method): - self._create_data() - - def test_doc_string(self): - - df = DataFrame({"B": [0, 1, 2, np.nan, 4]}) - df - df.expanding(2).sum() - - @pytest.mark.parametrize("which", ["series", "frame"]) - def test_constructor(self, which): - # GH 12669 - - o = getattr(self, which) - c = o.expanding - - # valid - c(min_periods=1) - c(min_periods=1, center=True) - c(min_periods=1, center=False) - - # not valid - for w in [2.0, "foo", np.array([2])]: - with pytest.raises(ValueError): - c(min_periods=w) - with pytest.raises(ValueError): - c(min_periods=1, center=w) - - @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) - def test_numpy_compat(self, method): - # see gh-12811 - e = rwindow.Expanding(Series([2, 4, 6]), window=2) - - msg = "numpy operations are not valid with window objects" - - with pytest.raises(UnsupportedFunctionCall, match=msg): - getattr(e, method)(1, 2, 3) - with pytest.raises(UnsupportedFunctionCall, match=msg): - getattr(e, method)(dtype=np.float64) - - @pytest.mark.parametrize( - "expander", - [ - 1, - pytest.param( - "ls", - marks=pytest.mark.xfail( - reason="GH#16425 expanding with offset not supported" - ), - ), - ], - ) - def test_empty_df_expanding(self, expander): - # GH 15819 Verifies that datetime and integer expanding windows can be - # applied to empty DataFrames - - expected = DataFrame() - result = DataFrame().expanding(expander).sum() - tm.assert_frame_equal(result, expected) - - # Verifies that datetime and integer expanding windows can be applied - # to empty DataFrames with datetime index - expected = DataFrame(index=pd.DatetimeIndex([])) - result = DataFrame(index=pd.DatetimeIndex([])).expanding(expander).sum() - tm.assert_frame_equal(result, expected) - - def test_missing_minp_zero(self): - # https://github.com/pandas-dev/pandas/pull/18921 - # minp=0 - x = pd.Series([np.nan]) - result = x.expanding(min_periods=0).sum() - expected = pd.Series([0.0]) - tm.assert_series_equal(result, expected) - - # minp=1 - result = x.expanding(min_periods=1).sum() - expected = pd.Series([np.nan]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("klass", [pd.Series, pd.DataFrame]) - def test_iter_raises(self, klass): - # https://github.com/pandas-dev/pandas/issues/11704 - # Iteration over a Window - obj = klass([1, 2, 3, 4]) - with pytest.raises(NotImplementedError): - iter(obj.expanding(2)) - - def test_expanding_axis(self, axis_frame): - # see gh-23372. - df = DataFrame(np.ones((10, 20))) - axis = df._get_axis_number(axis_frame) - - if axis == 0: - expected = DataFrame( - {i: [np.nan] * 2 + [float(j) for j in range(3, 11)] for i in range(20)} - ) - else: - # axis == 1 - expected = DataFrame([[np.nan] * 2 + [float(i) for i in range(3, 21)]] * 10) - - result = df.expanding(3, axis=axis_frame).sum() - tm.assert_frame_equal(result, expected) - - -class TestEWM(Base): - def setup_method(self, method): - self._create_data() - - def test_doc_string(self): - - df = DataFrame({"B": [0, 1, 2, np.nan, 4]}) - df - df.ewm(com=0.5).mean() - - @pytest.mark.parametrize("which", ["series", "frame"]) - def test_constructor(self, which): - o = getattr(self, which) - c = o.ewm - - # valid - c(com=0.5) - c(span=1.5) - c(alpha=0.5) - c(halflife=0.75) - c(com=0.5, span=None) - c(alpha=0.5, com=None) - c(halflife=0.75, alpha=None) - - # not valid: mutually exclusive - with pytest.raises(ValueError): - c(com=0.5, alpha=0.5) - with pytest.raises(ValueError): - c(span=1.5, halflife=0.75) - with pytest.raises(ValueError): - c(alpha=0.5, span=1.5) - - # not valid: com < 0 - with pytest.raises(ValueError): - c(com=-0.5) - - # not valid: span < 1 - with pytest.raises(ValueError): - c(span=0.5) - - # not valid: halflife <= 0 - with pytest.raises(ValueError): - c(halflife=0) - - # not valid: alpha <= 0 or alpha > 1 - for alpha in (-0.5, 1.5): - with pytest.raises(ValueError): - c(alpha=alpha) - - @pytest.mark.parametrize("method", ["std", "mean", "var"]) - def test_numpy_compat(self, method): - # see gh-12811 - e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5) - - msg = "numpy operations are not valid with window objects" - - with pytest.raises(UnsupportedFunctionCall, match=msg): - getattr(e, method)(1, 2, 3) - with pytest.raises(UnsupportedFunctionCall, match=msg): - getattr(e, method)(dtype=np.float64) - - -@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning") -class TestMoments(Base): - def setup_method(self, method): - self._create_data() - - def test_centered_axis_validation(self): - - # ok - Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean() - - # bad axis - with pytest.raises(ValueError): - Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean() - - # ok ok - DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean() - DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean() - - # bad axis - with pytest.raises(ValueError): - (DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean()) - - def test_rolling_sum(self, raw): - self._check_moment_func( - np.nansum, name="sum", zero_min_periods_equal=False, raw=raw - ) - - def test_rolling_count(self, raw): - counter = lambda x: np.isfinite(x).astype(float).sum() - self._check_moment_func( - counter, name="count", has_min_periods=False, fill_value=0, raw=raw - ) - - def test_rolling_mean(self, raw): - self._check_moment_func(np.mean, name="mean", raw=raw) - - @td.skip_if_no_scipy - def test_cmov_mean(self): - # GH 8238 - vals = np.array( - [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48] - ) - result = Series(vals).rolling(5, center=True).mean() - expected = Series( - [ - np.nan, - np.nan, - 9.962, - 11.27, - 11.564, - 12.516, - 12.818, - 12.952, - np.nan, - np.nan, - ] - ) - tm.assert_series_equal(expected, result) - - @td.skip_if_no_scipy - def test_cmov_window(self): - # GH 8238 - vals = np.array( - [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48] - ) - result = Series(vals).rolling(5, win_type="boxcar", center=True).mean() - expected = Series( - [ - np.nan, - np.nan, - 9.962, - 11.27, - 11.564, - 12.516, - 12.818, - 12.952, - np.nan, - np.nan, - ] - ) - tm.assert_series_equal(expected, result) - - @td.skip_if_no_scipy - def test_cmov_window_corner(self): - # GH 8238 - # all nan - vals = pd.Series([np.nan] * 10) - result = vals.rolling(5, center=True, win_type="boxcar").mean() - assert np.isnan(result).all() - - # empty - vals = pd.Series([]) - result = vals.rolling(5, center=True, win_type="boxcar").mean() - assert len(result) == 0 - - # shorter than window - vals = pd.Series(np.random.randn(5)) - result = vals.rolling(10, win_type="boxcar").mean() - assert np.isnan(result).all() - assert len(result) == 5 - - @td.skip_if_no_scipy - def test_cmov_window_frame(self): - # Gh 8238 - vals = np.array( - [ - [12.18, 3.64], - [10.18, 9.16], - [13.24, 14.61], - [4.51, 8.11], - [6.15, 11.44], - [9.14, 6.21], - [11.31, 10.67], - [2.94, 6.51], - [9.42, 8.39], - [12.44, 7.34], - ] - ) - - xp = np.array( - [ - [np.nan, np.nan], - [np.nan, np.nan], - [9.252, 9.392], - [8.644, 9.906], - [8.87, 10.208], - [6.81, 8.588], - [7.792, 8.644], - [9.05, 7.824], - [np.nan, np.nan], - [np.nan, np.nan], - ] - ) - - # DataFrame - rs = DataFrame(vals).rolling(5, win_type="boxcar", center=True).mean() - tm.assert_frame_equal(DataFrame(xp), rs) - - # invalid method - with pytest.raises(AttributeError): - (DataFrame(vals).rolling(5, win_type="boxcar", center=True).std()) - - # sum - xp = np.array( - [ - [np.nan, np.nan], - [np.nan, np.nan], - [46.26, 46.96], - [43.22, 49.53], - [44.35, 51.04], - [34.05, 42.94], - [38.96, 43.22], - [45.25, 39.12], - [np.nan, np.nan], - [np.nan, np.nan], - ] - ) - - rs = DataFrame(vals).rolling(5, win_type="boxcar", center=True).sum() - tm.assert_frame_equal(DataFrame(xp), rs) - - @td.skip_if_no_scipy - def test_cmov_window_na_min_periods(self): - # min_periods - vals = Series(np.random.randn(10)) - vals[4] = np.nan - vals[8] = np.nan - - xp = vals.rolling(5, min_periods=4, center=True).mean() - rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean() - tm.assert_series_equal(xp, rs) - - @td.skip_if_no_scipy - def test_cmov_window_regular(self, win_types): - # GH 8238 - vals = np.array( - [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48] - ) - xps = { - "hamming": [ - np.nan, - np.nan, - 8.71384, - 9.56348, - 12.38009, - 14.03687, - 13.8567, - 11.81473, - np.nan, - np.nan, - ], - "triang": [ - np.nan, - np.nan, - 9.28667, - 10.34667, - 12.00556, - 13.33889, - 13.38, - 12.33667, - np.nan, - np.nan, - ], - "barthann": [ - np.nan, - np.nan, - 8.4425, - 9.1925, - 12.5575, - 14.3675, - 14.0825, - 11.5675, - np.nan, - np.nan, - ], - "bohman": [ - np.nan, - np.nan, - 7.61599, - 9.1764, - 12.83559, - 14.17267, - 14.65923, - 11.10401, - np.nan, - np.nan, - ], - "blackmanharris": [ - np.nan, - np.nan, - 6.97691, - 9.16438, - 13.05052, - 14.02156, - 15.10512, - 10.74574, - np.nan, - np.nan, - ], - "nuttall": [ - np.nan, - np.nan, - 7.04618, - 9.16786, - 13.02671, - 14.03559, - 15.05657, - 10.78514, - np.nan, - np.nan, - ], - "blackman": [ - np.nan, - np.nan, - 7.73345, - 9.17869, - 12.79607, - 14.20036, - 14.57726, - 11.16988, - np.nan, - np.nan, - ], - "bartlett": [ - np.nan, - np.nan, - 8.4425, - 9.1925, - 12.5575, - 14.3675, - 14.0825, - 11.5675, - np.nan, - np.nan, - ], - } - - xp = Series(xps[win_types]) - rs = Series(vals).rolling(5, win_type=win_types, center=True).mean() - tm.assert_series_equal(xp, rs) - - @td.skip_if_no_scipy - def test_cmov_window_regular_linear_range(self, win_types): - # GH 8238 - vals = np.array(range(10), dtype=np.float) - xp = vals.copy() - xp[:2] = np.nan - xp[-2:] = np.nan - xp = Series(xp) - - rs = Series(vals).rolling(5, win_type=win_types, center=True).mean() - tm.assert_series_equal(xp, rs) - - @td.skip_if_no_scipy - def test_cmov_window_regular_missing_data(self, win_types): - # GH 8238 - vals = np.array( - [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48] - ) - xps = { - "bartlett": [ - np.nan, - np.nan, - 9.70333, - 10.5225, - 8.4425, - 9.1925, - 12.5575, - 14.3675, - 15.61667, - 13.655, - ], - "blackman": [ - np.nan, - np.nan, - 9.04582, - 11.41536, - 7.73345, - 9.17869, - 12.79607, - 14.20036, - 15.8706, - 13.655, - ], - "barthann": [ - np.nan, - np.nan, - 9.70333, - 10.5225, - 8.4425, - 9.1925, - 12.5575, - 14.3675, - 15.61667, - 13.655, - ], - "bohman": [ - np.nan, - np.nan, - 8.9444, - 11.56327, - 7.61599, - 9.1764, - 12.83559, - 14.17267, - 15.90976, - 13.655, - ], - "hamming": [ - np.nan, - np.nan, - 9.59321, - 10.29694, - 8.71384, - 9.56348, - 12.38009, - 14.20565, - 15.24694, - 13.69758, - ], - "nuttall": [ - np.nan, - np.nan, - 8.47693, - 12.2821, - 7.04618, - 9.16786, - 13.02671, - 14.03673, - 16.08759, - 13.65553, - ], - "triang": [ - np.nan, - np.nan, - 9.33167, - 9.76125, - 9.28667, - 10.34667, - 12.00556, - 13.82125, - 14.49429, - 13.765, - ], - "blackmanharris": [ - np.nan, - np.nan, - 8.42526, - 12.36824, - 6.97691, - 9.16438, - 13.05052, - 14.02175, - 16.1098, - 13.65509, - ], - } - - xp = Series(xps[win_types]) - rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean() - tm.assert_series_equal(xp, rs) - - @td.skip_if_no_scipy - def test_cmov_window_special(self, win_types_special): - # GH 8238 - kwds = { - "kaiser": {"beta": 1.0}, - "gaussian": {"std": 1.0}, - "general_gaussian": {"power": 2.0, "width": 2.0}, - "exponential": {"tau": 10}, - } - - vals = np.array( - [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48] - ) - - xps = { - "gaussian": [ - np.nan, - np.nan, - 8.97297, - 9.76077, - 12.24763, - 13.89053, - 13.65671, - 12.01002, - np.nan, - np.nan, - ], - "general_gaussian": [ - np.nan, - np.nan, - 9.85011, - 10.71589, - 11.73161, - 13.08516, - 12.95111, - 12.74577, - np.nan, - np.nan, - ], - "kaiser": [ - np.nan, - np.nan, - 9.86851, - 11.02969, - 11.65161, - 12.75129, - 12.90702, - 12.83757, - np.nan, - np.nan, - ], - "exponential": [ - np.nan, - np.nan, - 9.83364, - 11.10472, - 11.64551, - 12.66138, - 12.92379, - 12.83770, - np.nan, - np.nan, - ], - } - - xp = Series(xps[win_types_special]) - rs = ( - Series(vals) - .rolling(5, win_type=win_types_special, center=True) - .mean(**kwds[win_types_special]) - ) - tm.assert_series_equal(xp, rs) - - @td.skip_if_no_scipy - def test_cmov_window_special_linear_range(self, win_types_special): - # GH 8238 - kwds = { - "kaiser": {"beta": 1.0}, - "gaussian": {"std": 1.0}, - "general_gaussian": {"power": 2.0, "width": 2.0}, - "slepian": {"width": 0.5}, - "exponential": {"tau": 10}, - } - - vals = np.array(range(10), dtype=np.float) - xp = vals.copy() - xp[:2] = np.nan - xp[-2:] = np.nan - xp = Series(xp) - - rs = ( - Series(vals) - .rolling(5, win_type=win_types_special, center=True) - .mean(**kwds[win_types_special]) - ) - tm.assert_series_equal(xp, rs) - - def test_rolling_median(self, raw): - self._check_moment_func(np.median, name="median", raw=raw) - - def test_rolling_min(self, raw): - self._check_moment_func(np.min, name="min", raw=raw) - - a = pd.Series([1, 2, 3, 4, 5]) - result = a.rolling(window=100, min_periods=1).min() - expected = pd.Series(np.ones(len(a))) - tm.assert_series_equal(result, expected) - - with pytest.raises(ValueError): - pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min() - - def test_rolling_max(self, raw): - self._check_moment_func(np.max, name="max", raw=raw) - - a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64) - b = a.rolling(window=100, min_periods=1).max() - tm.assert_almost_equal(a, b) - - with pytest.raises(ValueError): - pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max() - - @pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) - def test_rolling_quantile(self, q, raw): - def scoreatpercentile(a, per): - values = np.sort(a, axis=0) - - idx = int(per / 1.0 * (values.shape[0] - 1)) - - if idx == values.shape[0] - 1: - retval = values[-1] - - else: - qlow = float(idx) / float(values.shape[0] - 1) - qhig = float(idx + 1) / float(values.shape[0] - 1) - vlow = values[idx] - vhig = values[idx + 1] - retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow) - - return retval - - def quantile_func(x): - return scoreatpercentile(x, q) - - self._check_moment_func(quantile_func, name="quantile", quantile=q, raw=raw) - - def test_rolling_quantile_np_percentile(self): - # #9413: Tests that rolling window's quantile default behavior - # is analogous to Numpy's percentile - row = 10 - col = 5 - idx = pd.date_range("20100101", periods=row, freq="B") - df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx) - - df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0) - np_percentile = np.percentile(df, [25, 50, 75], axis=0) - - tm.assert_almost_equal(df_quantile.values, np.array(np_percentile)) - - @pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1]) - @pytest.mark.parametrize( - "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"] - ) - @pytest.mark.parametrize( - "data", - [ - [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], - [8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0], - [0.0, np.nan, 0.2, np.nan, 0.4], - [np.nan, np.nan, np.nan, np.nan], - [np.nan, 0.1, np.nan, 0.3, 0.4, 0.5], - [0.5], - [np.nan, 0.7, 0.6], - ], - ) - def test_rolling_quantile_interpolation_options( - self, quantile, interpolation, data - ): - # Tests that rolling window's quantile behavior is analogous to - # Series' quantile for each interpolation option - s = Series(data) - - q1 = s.quantile(quantile, interpolation) - q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1] - - if np.isnan(q1): - assert np.isnan(q2) - else: - assert q1 == q2 - - def test_invalid_quantile_value(self): - data = np.arange(5) - s = Series(data) - - msg = "Interpolation 'invalid' is not supported" - with pytest.raises(ValueError, match=msg): - s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid") - - def test_rolling_quantile_param(self): - ser = Series([0.0, 0.1, 0.5, 0.9, 1.0]) - - with pytest.raises(ValueError): - ser.rolling(3).quantile(-0.1) - - with pytest.raises(ValueError): - ser.rolling(3).quantile(10.0) - - with pytest.raises(TypeError): - ser.rolling(3).quantile("foo") - - def test_rolling_apply(self, raw): - # suppress warnings about empty slices, as we are deliberately testing - # with a 0-length Series - - def f(x): - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - message=".*(empty slice|0 for slice).*", - category=RuntimeWarning, - ) - return x[np.isfinite(x)].mean() - - self._check_moment_func(np.mean, name="apply", func=f, raw=raw) - - expected = Series([]) - result = expected.rolling(10).apply(lambda x: x.mean(), raw=raw) - tm.assert_series_equal(result, expected) - - # gh-8080 - s = Series([None, None, None]) - result = s.rolling(2, min_periods=0).apply(lambda x: len(x), raw=raw) - expected = Series([1.0, 2.0, 2.0]) - tm.assert_series_equal(result, expected) - - result = s.rolling(2, min_periods=0).apply(len, raw=raw) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("klass", [Series, DataFrame]) - @pytest.mark.parametrize( - "method", [lambda x: x.rolling(window=2), lambda x: x.expanding()] - ) - def test_apply_future_warning(self, klass, method): - - # gh-5071 - s = klass(np.arange(3)) - - with tm.assert_produces_warning(FutureWarning): - method(s).apply(lambda x: len(x)) - - def test_rolling_apply_out_of_bounds(self, raw): - # gh-1850 - vals = pd.Series([1, 2, 3, 4]) - - result = vals.rolling(10).apply(np.sum, raw=raw) - assert result.isna().all() - - result = vals.rolling(10, min_periods=1).apply(np.sum, raw=raw) - expected = pd.Series([1, 3, 6, 10], dtype=float) - tm.assert_almost_equal(result, expected) - - @pytest.mark.parametrize("window", [2, "2s"]) - def test_rolling_apply_with_pandas_objects(self, window): - # 5071 - df = pd.DataFrame( - {"A": np.random.randn(5), "B": np.random.randint(0, 10, size=5)}, - index=pd.date_range("20130101", periods=5, freq="s"), - ) - - # we have an equal spaced timeseries index - # so simulate removing the first period - def f(x): - if x.index[0] == df.index[0]: - return np.nan - return x.iloc[-1] - - result = df.rolling(window).apply(f, raw=False) - expected = df.iloc[2:].reindex_like(df) - tm.assert_frame_equal(result, expected) - - with pytest.raises(AttributeError): - df.rolling(window).apply(f, raw=True) - - def test_rolling_std(self, raw): - self._check_moment_func(lambda x: np.std(x, ddof=1), name="std", raw=raw) - self._check_moment_func( - lambda x: np.std(x, ddof=0), name="std", ddof=0, raw=raw - ) - - def test_rolling_std_1obs(self): - vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0]) - - result = vals.rolling(1, min_periods=1).std() - expected = pd.Series([np.nan] * 5) - tm.assert_series_equal(result, expected) - - result = vals.rolling(1, min_periods=1).std(ddof=0) - expected = pd.Series([0.0] * 5) - tm.assert_series_equal(result, expected) - - result = pd.Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std() - assert np.isnan(result[2]) - - def test_rolling_std_neg_sqrt(self): - # unit test from Bottleneck - - # Test move_nanstd for neg sqrt. - - a = pd.Series( - [ - 0.0011448196318903589, - 0.00028718669878572767, - 0.00028718669878572767, - 0.00028718669878572767, - 0.00028718669878572767, - ] - ) - b = a.rolling(window=3).std() - assert np.isfinite(b[2:]).all() - - b = a.ewm(span=3).std() - assert np.isfinite(b[2:]).all() - - def test_rolling_var(self, raw): - self._check_moment_func(lambda x: np.var(x, ddof=1), name="var", raw=raw) - self._check_moment_func( - lambda x: np.var(x, ddof=0), name="var", ddof=0, raw=raw - ) - - @td.skip_if_no_scipy - def test_rolling_skew(self, raw): - from scipy.stats import skew - - self._check_moment_func(lambda x: skew(x, bias=False), name="skew", raw=raw) - - @td.skip_if_no_scipy - def test_rolling_kurt(self, raw): - from scipy.stats import kurtosis - - self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt", raw=raw) - - def _check_moment_func( - self, - static_comp, - name, - raw, - has_min_periods=True, - has_center=True, - has_time_rule=True, - fill_value=None, - zero_min_periods_equal=True, - **kwargs - ): - - # inject raw - if name == "apply": - kwargs = copy.copy(kwargs) - kwargs["raw"] = raw - - def get_result(obj, window, min_periods=None, center=False): - r = obj.rolling(window=window, min_periods=min_periods, center=center) - return getattr(r, name)(**kwargs) - - series_result = get_result(self.series, window=50) - assert isinstance(series_result, Series) - tm.assert_almost_equal(series_result.iloc[-1], static_comp(self.series[-50:])) - - frame_result = get_result(self.frame, window=50) - assert isinstance(frame_result, DataFrame) - tm.assert_series_equal( - frame_result.iloc[-1, :], - self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw), - check_names=False, - ) - - # check time_rule works - if has_time_rule: - win = 25 - minp = 10 - series = self.series[::2].resample("B").mean() - frame = self.frame[::2].resample("B").mean() - - if has_min_periods: - series_result = get_result(series, window=win, min_periods=minp) - frame_result = get_result(frame, window=win, min_periods=minp) - else: - series_result = get_result(series, window=win) - frame_result = get_result(frame, window=win) - - last_date = series_result.index[-1] - prev_date = last_date - 24 * offsets.BDay() - - trunc_series = self.series[::2].truncate(prev_date, last_date) - trunc_frame = self.frame[::2].truncate(prev_date, last_date) - - tm.assert_almost_equal(series_result[-1], static_comp(trunc_series)) - - tm.assert_series_equal( - frame_result.xs(last_date), - trunc_frame.apply(static_comp, raw=raw), - check_names=False, - ) - - # excluding NaNs correctly - obj = Series(randn(50)) - obj[:10] = np.NaN - obj[-10:] = np.NaN - if has_min_periods: - result = get_result(obj, 50, min_periods=30) - tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10])) - - # min_periods is working correctly - result = get_result(obj, 20, min_periods=15) - assert isna(result.iloc[23]) - assert not isna(result.iloc[24]) - - assert not isna(result.iloc[-6]) - assert isna(result.iloc[-5]) - - obj2 = Series(randn(20)) - result = get_result(obj2, 10, min_periods=5) - assert isna(result.iloc[3]) - assert notna(result.iloc[4]) - - if zero_min_periods_equal: - # min_periods=0 may be equivalent to min_periods=1 - result0 = get_result(obj, 20, min_periods=0) - result1 = get_result(obj, 20, min_periods=1) - tm.assert_almost_equal(result0, result1) - else: - result = get_result(obj, 50) - tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10])) - - # window larger than series length (#7297) - if has_min_periods: - for minp in (0, len(self.series) - 1, len(self.series)): - result = get_result(self.series, len(self.series) + 1, min_periods=minp) - expected = get_result(self.series, len(self.series), min_periods=minp) - nan_mask = isna(result) - tm.assert_series_equal(nan_mask, isna(expected)) - - nan_mask = ~nan_mask - tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) - else: - result = get_result(self.series, len(self.series) + 1) - expected = get_result(self.series, len(self.series)) - nan_mask = isna(result) - tm.assert_series_equal(nan_mask, isna(expected)) - - nan_mask = ~nan_mask - tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) - - # check center=True - if has_center: - if has_min_periods: - result = get_result(obj, 20, min_periods=15, center=True) - expected = get_result( - pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15 - )[9:].reset_index(drop=True) - else: - result = get_result(obj, 20, center=True) - expected = get_result(pd.concat([obj, Series([np.NaN] * 9)]), 20)[ - 9: - ].reset_index(drop=True) - - tm.assert_series_equal(result, expected) - - # shifter index - s = ["x{x:d}".format(x=x) for x in range(12)] - - if has_min_periods: - minp = 10 - - series_xp = ( - get_result( - self.series.reindex(list(self.series.index) + s), - window=25, - min_periods=minp, - ) - .shift(-12) - .reindex(self.series.index) - ) - frame_xp = ( - get_result( - self.frame.reindex(list(self.frame.index) + s), - window=25, - min_periods=minp, - ) - .shift(-12) - .reindex(self.frame.index) - ) - - series_rs = get_result( - self.series, window=25, min_periods=minp, center=True - ) - frame_rs = get_result( - self.frame, window=25, min_periods=minp, center=True - ) - - else: - series_xp = ( - get_result( - self.series.reindex(list(self.series.index) + s), window=25 - ) - .shift(-12) - .reindex(self.series.index) - ) - frame_xp = ( - get_result( - self.frame.reindex(list(self.frame.index) + s), window=25 - ) - .shift(-12) - .reindex(self.frame.index) - ) - - series_rs = get_result(self.series, window=25, center=True) - frame_rs = get_result(self.frame, window=25, center=True) - - if fill_value is not None: - series_xp = series_xp.fillna(fill_value) - frame_xp = frame_xp.fillna(fill_value) - tm.assert_series_equal(series_xp, series_rs) - tm.assert_frame_equal(frame_xp, frame_rs) - - def test_ewma(self): - self._check_ew(name="mean") - - vals = pd.Series(np.zeros(1000)) - vals[5] = 1 - result = vals.ewm(span=100, adjust=False).mean().sum() - assert np.abs(result - 1) < 1e-2 - - @pytest.mark.parametrize("adjust", [True, False]) - @pytest.mark.parametrize("ignore_na", [True, False]) - def test_ewma_cases(self, adjust, ignore_na): - # try adjust/ignore_na args matrix - - s = Series([1.0, 2.0, 4.0, 8.0]) - - if adjust: - expected = Series([1.0, 1.6, 2.736842, 4.923077]) - else: - expected = Series([1.0, 1.333333, 2.222222, 4.148148]) - - result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() - tm.assert_series_equal(result, expected) - - def test_ewma_nan_handling(self): - s = Series([1.0] + [np.nan] * 5 + [1.0]) - result = s.ewm(com=5).mean() - tm.assert_series_equal(result, Series([1.0] * len(s))) - - s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0]) - result = s.ewm(com=5).mean() - tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4)) - - # GH 7603 - s0 = Series([np.nan, 1.0, 101.0]) - s1 = Series([1.0, np.nan, 101.0]) - s2 = Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]) - s3 = Series([1.0, np.nan, 101.0, 50.0]) - com = 2.0 - alpha = 1.0 / (1.0 + com) - - def simple_wma(s, w): - return (s.multiply(w).cumsum() / w.cumsum()).fillna(method="ffill") - - for (s, adjust, ignore_na, w) in [ - (s0, True, False, [np.nan, (1.0 - alpha), 1.0]), - (s0, True, True, [np.nan, (1.0 - alpha), 1.0]), - (s0, False, False, [np.nan, (1.0 - alpha), alpha]), - (s0, False, True, [np.nan, (1.0 - alpha), alpha]), - (s1, True, False, [(1.0 - alpha) ** 2, np.nan, 1.0]), - (s1, True, True, [(1.0 - alpha), np.nan, 1.0]), - (s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]), - (s1, False, True, [(1.0 - alpha), np.nan, alpha]), - ( - s2, - True, - False, - [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan], - ), - (s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]), - ( - s2, - False, - False, - [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, alpha, np.nan], - ), - (s2, False, True, [np.nan, (1.0 - alpha), np.nan, np.nan, alpha, np.nan]), - (s3, True, False, [(1.0 - alpha) ** 3, np.nan, (1.0 - alpha), 1.0]), - (s3, True, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha), 1.0]), - ( - s3, - False, - False, - [ - (1.0 - alpha) ** 3, - np.nan, - (1.0 - alpha) * alpha, - alpha * ((1.0 - alpha) ** 2 + alpha), - ], - ), - ( - s3, - False, - True, - [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha], - ), - ]: - expected = simple_wma(s, Series(w)) - result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean() - - tm.assert_series_equal(result, expected) - if ignore_na is False: - # check that ignore_na defaults to False - result = s.ewm(com=com, adjust=adjust).mean() - tm.assert_series_equal(result, expected) - - def test_ewmvar(self): - self._check_ew(name="var") - - def test_ewmvol(self): - self._check_ew(name="vol") - - def test_ewma_span_com_args(self): - A = self.series.ewm(com=9.5).mean() - B = self.series.ewm(span=20).mean() - tm.assert_almost_equal(A, B) - - with pytest.raises(ValueError): - self.series.ewm(com=9.5, span=20) - with pytest.raises(ValueError): - self.series.ewm().mean() - - def test_ewma_halflife_arg(self): - A = self.series.ewm(com=13.932726172912965).mean() - B = self.series.ewm(halflife=10.0).mean() - tm.assert_almost_equal(A, B) - - with pytest.raises(ValueError): - self.series.ewm(span=20, halflife=50) - with pytest.raises(ValueError): - self.series.ewm(com=9.5, halflife=50) - with pytest.raises(ValueError): - self.series.ewm(com=9.5, span=20, halflife=50) - with pytest.raises(ValueError): - self.series.ewm() - - def test_ewm_alpha(self): - # GH 10789 - s = Series(self.arr) - a = s.ewm(alpha=0.61722699889169674).mean() - b = s.ewm(com=0.62014947789973052).mean() - c = s.ewm(span=2.240298955799461).mean() - d = s.ewm(halflife=0.721792864318).mean() - tm.assert_series_equal(a, b) - tm.assert_series_equal(a, c) - tm.assert_series_equal(a, d) - - def test_ewm_alpha_arg(self): - # GH 10789 - s = self.series - with pytest.raises(ValueError): - s.ewm() - with pytest.raises(ValueError): - s.ewm(com=10.0, alpha=0.5) - with pytest.raises(ValueError): - s.ewm(span=10.0, alpha=0.5) - with pytest.raises(ValueError): - s.ewm(halflife=10.0, alpha=0.5) - - def test_ewm_domain_checks(self): - # GH 12492 - s = Series(self.arr) - msg = "comass must satisfy: comass >= 0" - with pytest.raises(ValueError, match=msg): - s.ewm(com=-0.1) - s.ewm(com=0.0) - s.ewm(com=0.1) - - msg = "span must satisfy: span >= 1" - with pytest.raises(ValueError, match=msg): - s.ewm(span=-0.1) - with pytest.raises(ValueError, match=msg): - s.ewm(span=0.0) - with pytest.raises(ValueError, match=msg): - s.ewm(span=0.9) - s.ewm(span=1.0) - s.ewm(span=1.1) - - msg = "halflife must satisfy: halflife > 0" - with pytest.raises(ValueError, match=msg): - s.ewm(halflife=-0.1) - with pytest.raises(ValueError, match=msg): - s.ewm(halflife=0.0) - s.ewm(halflife=0.1) - - msg = "alpha must satisfy: 0 < alpha <= 1" - with pytest.raises(ValueError, match=msg): - s.ewm(alpha=-0.1) - with pytest.raises(ValueError, match=msg): - s.ewm(alpha=0.0) - s.ewm(alpha=0.1) - s.ewm(alpha=1.0) - with pytest.raises(ValueError, match=msg): - s.ewm(alpha=1.1) - - @pytest.mark.parametrize("method", ["mean", "vol", "var"]) - def test_ew_empty_series(self, method): - vals = pd.Series([], dtype=np.float64) - - ewm = vals.ewm(3) - result = getattr(ewm, method)() - tm.assert_almost_equal(result, vals) - - def _check_ew(self, name=None, preserve_nan=False): - series_result = getattr(self.series.ewm(com=10), name)() - assert isinstance(series_result, Series) - - frame_result = getattr(self.frame.ewm(com=10), name)() - assert type(frame_result) == DataFrame - - result = getattr(self.series.ewm(com=10), name)() - if preserve_nan: - assert result[self._nan_locs].isna().all() - - # excluding NaNs correctly - arr = randn(50) - arr[:10] = np.NaN - arr[-10:] = np.NaN - s = Series(arr) - - # check min_periods - # GH 7898 - result = getattr(s.ewm(com=50, min_periods=2), name)() - assert result[:11].isna().all() - assert not result[11:].isna().any() - - for min_periods in (0, 1): - result = getattr(s.ewm(com=50, min_periods=min_periods), name)() - if name == "mean": - assert result[:10].isna().all() - assert not result[10:].isna().any() - else: - # ewm.std, ewm.vol, ewm.var (with bias=False) require at least - # two values - assert result[:11].isna().all() - assert not result[11:].isna().any() - - # check series of length 0 - result = getattr(Series().ewm(com=50, min_periods=min_periods), name)() - tm.assert_series_equal(result, Series()) - - # check series of length 1 - result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)() - if name == "mean": - tm.assert_series_equal(result, Series([1.0])) - else: - # ewm.std, ewm.vol, ewm.var with bias=False require at least - # two values - tm.assert_series_equal(result, Series([np.NaN])) - - # pass in ints - result2 = getattr(Series(np.arange(50)).ewm(span=10), name)() - assert result2.dtype == np.float_ - - -# create the data only once as we are not setting it -def _create_consistency_data(): - def create_series(): - return [ - Series(), - Series([np.nan]), - Series([np.nan, np.nan]), - Series([3.0]), - Series([np.nan, 3.0]), - Series([3.0, np.nan]), - Series([1.0, 3.0]), - Series([2.0, 2.0]), - Series([3.0, 1.0]), - Series( - [5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan] - ), - Series( - [ - np.nan, - 5.0, - 5.0, - 5.0, - np.nan, - np.nan, - np.nan, - 5.0, - 5.0, - np.nan, - np.nan, - ] - ), - Series( - [ - np.nan, - np.nan, - 5.0, - 5.0, - np.nan, - np.nan, - np.nan, - 5.0, - 5.0, - np.nan, - np.nan, - ] - ), - Series( - [ - np.nan, - 3.0, - np.nan, - 3.0, - 4.0, - 5.0, - 6.0, - np.nan, - np.nan, - 7.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series( - [ - np.nan, - 5.0, - np.nan, - 2.0, - 4.0, - 0.0, - 9.0, - np.nan, - np.nan, - 3.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series( - [ - 2.0, - 3.0, - np.nan, - 3.0, - 4.0, - 5.0, - 6.0, - np.nan, - np.nan, - 7.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series( - [ - 2.0, - 5.0, - np.nan, - 2.0, - 4.0, - 0.0, - 9.0, - np.nan, - np.nan, - 3.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series(range(10)), - Series(range(20, 0, -2)), - ] - - def create_dataframes(): - return [ - DataFrame(), - DataFrame(columns=["a"]), - DataFrame(columns=["a", "a"]), - DataFrame(columns=["a", "b"]), - DataFrame(np.arange(10).reshape((5, 2))), - DataFrame(np.arange(25).reshape((5, 5))), - DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]), - ] + [DataFrame(s) for s in create_series()] - - def is_constant(x): - values = x.values.ravel() - return len(set(values[notna(values)])) == 1 - - def no_nans(x): - return x.notna().all().all() - - # data is a tuple(object, is_constant, no_nans) - data = create_series() + create_dataframes() - - return [(x, is_constant(x), no_nans(x)) for x in data] - - -_consistency_data = _create_consistency_data() - - -def _rolling_consistency_cases(): - for window in [1, 2, 3, 10, 20]: - for min_periods in {0, 1, 2, 3, 4, window}: - if min_periods and (min_periods > window): - continue - for center in [False, True]: - yield window, min_periods, center - - -class TestMomentsConsistency(Base): - base_functions = [ - (lambda v: Series(v).count(), None, "count"), - (lambda v: Series(v).max(), None, "max"), - (lambda v: Series(v).min(), None, "min"), - (lambda v: Series(v).sum(), None, "sum"), - (lambda v: Series(v).mean(), None, "mean"), - (lambda v: Series(v).std(), 1, "std"), - (lambda v: Series(v).cov(Series(v)), None, "cov"), - (lambda v: Series(v).corr(Series(v)), None, "corr"), - (lambda v: Series(v).var(), 1, "var"), - # restore once GH 8086 is fixed - # lambda v: Series(v).skew(), 3, 'skew'), - # (lambda v: Series(v).kurt(), 4, 'kurt'), - # restore once GH 8084 is fixed - # lambda v: Series(v).quantile(0.3), None, 'quantile'), - (lambda v: Series(v).median(), None, "median"), - (np.nanmax, 1, "max"), - (np.nanmin, 1, "min"), - (np.nansum, 1, "sum"), - (np.nanmean, 1, "mean"), - (lambda v: np.nanstd(v, ddof=1), 1, "std"), - (lambda v: np.nanvar(v, ddof=1), 1, "var"), - (np.nanmedian, 1, "median"), - ] - no_nan_functions = [ - (np.max, None, "max"), - (np.min, None, "min"), - (np.sum, None, "sum"), - (np.mean, None, "mean"), - (lambda v: np.std(v, ddof=1), 1, "std"), - (lambda v: np.var(v, ddof=1), 1, "var"), - (np.median, None, "median"), - ] - - def _create_data(self): - super()._create_data() - self.data = _consistency_data - - def setup_method(self, method): - self._create_data() - - def _test_moments_consistency( - self, - min_periods, - count, - mean, - mock_mean, - corr, - var_unbiased=None, - std_unbiased=None, - cov_unbiased=None, - var_biased=None, - std_biased=None, - cov_biased=None, - var_debiasing_factors=None, - ): - def _non_null_values(x): - values = x.values.ravel() - return set(values[notna(values)].tolist()) - - for (x, is_constant, no_nans) in self.data: - count_x = count(x) - mean_x = mean(x) - - if mock_mean: - # check that mean equals mock_mean - expected = mock_mean(x) - tm.assert_equal(mean_x, expected.astype("float64")) - - # check that correlation of a series with itself is either 1 or NaN - corr_x_x = corr(x, x) - - # assert _non_null_values(corr_x_x).issubset(set([1.])) - # restore once rolling_cov(x, x) is identically equal to var(x) - - if is_constant: - exp = x.max() if isinstance(x, Series) else x.max().max() - - # check mean of constant series - expected = x * np.nan - expected[count_x >= max(min_periods, 1)] = exp - tm.assert_equal(mean_x, expected) - - # check correlation of constant series with itself is NaN - expected[:] = np.nan - tm.assert_equal(corr_x_x, expected) - - if var_unbiased and var_biased and var_debiasing_factors: - # check variance debiasing factors - var_unbiased_x = var_unbiased(x) - var_biased_x = var_biased(x) - var_debiasing_factors_x = var_debiasing_factors(x) - tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) - - for (std, var, cov) in [ - (std_biased, var_biased, cov_biased), - (std_unbiased, var_unbiased, cov_unbiased), - ]: - - # check that var(x), std(x), and cov(x) are all >= 0 - var_x = var(x) - std_x = std(x) - assert not (var_x < 0).any().any() - assert not (std_x < 0).any().any() - if cov: - cov_x_x = cov(x, x) - assert not (cov_x_x < 0).any().any() - - # check that var(x) == cov(x, x) - tm.assert_equal(var_x, cov_x_x) - - # check that var(x) == std(x)^2 - tm.assert_equal(var_x, std_x * std_x) - - if var is var_biased: - # check that biased var(x) == mean(x^2) - mean(x)^2 - mean_x2 = mean(x * x) - tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) - - if is_constant: - # check that variance of constant series is identically 0 - assert not (var_x > 0).any().any() - expected = x * np.nan - expected[count_x >= max(min_periods, 1)] = 0.0 - if var is var_unbiased: - expected[count_x < 2] = np.nan - tm.assert_equal(var_x, expected) - - if isinstance(x, Series): - for (y, is_constant, no_nans) in self.data: - if not x.isna().equals(y.isna()): - # can only easily test two Series with similar - # structure - continue - - # check that cor(x, y) is symmetric - corr_x_y = corr(x, y) - corr_y_x = corr(y, x) - tm.assert_equal(corr_x_y, corr_y_x) - - if cov: - # check that cov(x, y) is symmetric - cov_x_y = cov(x, y) - cov_y_x = cov(y, x) - tm.assert_equal(cov_x_y, cov_y_x) - - # check that cov(x, y) == (var(x+y) - var(x) - - # var(y)) / 2 - var_x_plus_y = var(x + y) - var_y = var(y) - tm.assert_equal( - cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y) - ) - - # check that corr(x, y) == cov(x, y) / (std(x) * - # std(y)) - std_y = std(y) - tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) - - if cov is cov_biased: - # check that biased cov(x, y) == mean(x*y) - - # mean(x)*mean(y) - mean_y = mean(y) - mean_x_times_y = mean(x * y) - tm.assert_equal( - cov_x_y, mean_x_times_y - (mean_x * mean_y) - ) - - @pytest.mark.slow - @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) - @pytest.mark.parametrize("adjust", [True, False]) - @pytest.mark.parametrize("ignore_na", [True, False]) - def test_ewm_consistency(self, min_periods, adjust, ignore_na): - def _weights(s, com, adjust, ignore_na): - if isinstance(s, DataFrame): - if not len(s.columns): - return DataFrame(index=s.index, columns=s.columns) - w = concat( - [ - _weights( - s.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na - ) - for i, _ in enumerate(s.columns) - ], - axis=1, - ) - w.index = s.index - w.columns = s.columns - return w - - w = Series(np.nan, index=s.index) - alpha = 1.0 / (1.0 + com) - if ignore_na: - w[s.notna()] = _weights( - s[s.notna()], com=com, adjust=adjust, ignore_na=False - ) - elif adjust: - for i in range(len(s)): - if s.iat[i] == s.iat[i]: - w.iat[i] = pow(1.0 / (1.0 - alpha), i) - else: - sum_wts = 0.0 - prev_i = -1 - for i in range(len(s)): - if s.iat[i] == s.iat[i]: - if prev_i == -1: - w.iat[i] = 1.0 - else: - w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, i - prev_i) - sum_wts += w.iat[i] - prev_i = i - return w - - def _variance_debiasing_factors(s, com, adjust, ignore_na): - weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) - cum_sum = weights.cumsum().fillna(method="ffill") - cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill") - numerator = cum_sum * cum_sum - denominator = numerator - cum_sum_sq - denominator[denominator <= 0.0] = np.nan - return numerator / denominator - - def _ewma(s, com, min_periods, adjust, ignore_na): - weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) - result = ( - s.multiply(weights) - .cumsum() - .divide(weights.cumsum()) - .fillna(method="ffill") - ) - result[ - s.expanding().count() < (max(min_periods, 1) if min_periods else 1) - ] = np.nan - return result - - com = 3.0 - # test consistency between different ewm* moments - self._test_moments_consistency( - min_periods=min_periods, - count=lambda x: x.expanding().count(), - mean=lambda x: x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).mean(), - mock_mean=lambda x: _ewma( - x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ), - corr=lambda x, y: x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).corr(y), - var_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=False) - ), - std_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).std(bias=False) - ), - cov_unbiased=lambda x, y: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).cov(y, bias=False) - ), - var_biased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=True) - ), - std_biased=lambda x: x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).std(bias=True), - cov_biased=lambda x, y: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).cov(y, bias=True) - ), - var_debiasing_factors=lambda x: ( - _variance_debiasing_factors( - x, com=com, adjust=adjust, ignore_na=ignore_na - ) - ), - ) - - @pytest.mark.slow - @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) - def test_expanding_consistency(self, min_periods): - - # suppress warnings about empty slices, as we are deliberately testing - # with empty/0-length Series/DataFrames - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - message=".*(empty slice|0 for slice).*", - category=RuntimeWarning, - ) - - # test consistency between different expanding_* moments - self._test_moments_consistency( - min_periods=min_periods, - count=lambda x: x.expanding().count(), - mean=lambda x: x.expanding(min_periods=min_periods).mean(), - mock_mean=lambda x: x.expanding(min_periods=min_periods).sum() - / x.expanding().count(), - corr=lambda x, y: x.expanding(min_periods=min_periods).corr(y), - var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), - std_unbiased=lambda x: x.expanding(min_periods=min_periods).std(), - cov_unbiased=lambda x, y: x.expanding(min_periods=min_periods).cov(y), - var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), - std_biased=lambda x: x.expanding(min_periods=min_periods).std(ddof=0), - cov_biased=lambda x, y: x.expanding(min_periods=min_periods).cov( - y, ddof=0 - ), - var_debiasing_factors=lambda x: ( - x.expanding().count() - / (x.expanding().count() - 1.0).replace(0.0, np.nan) - ), - ) - - # test consistency between expanding_xyz() and either (a) - # expanding_apply of Series.xyz(), or (b) expanding_apply of - # np.nanxyz() - for (x, is_constant, no_nans) in self.data: - functions = self.base_functions - - # GH 8269 - if no_nans: - functions = self.base_functions + self.no_nan_functions - for (f, require_min_periods, name) in functions: - expanding_f = getattr(x.expanding(min_periods=min_periods), name) - - if ( - require_min_periods - and (min_periods is not None) - and (min_periods < require_min_periods) - ): - continue - - if name == "count": - expanding_f_result = expanding_f() - expanding_apply_f_result = x.expanding(min_periods=0).apply( - func=f, raw=True - ) - else: - if name in ["cov", "corr"]: - expanding_f_result = expanding_f(pairwise=False) - else: - expanding_f_result = expanding_f() - expanding_apply_f_result = x.expanding( - min_periods=min_periods - ).apply(func=f, raw=True) - - # GH 9422 - if name in ["sum", "prod"]: - tm.assert_equal(expanding_f_result, expanding_apply_f_result) - - @pytest.mark.slow - @pytest.mark.parametrize( - "window,min_periods,center", list(_rolling_consistency_cases()) - ) - def test_rolling_consistency(self, window, min_periods, center): - - # suppress warnings about empty slices, as we are deliberately testing - # with empty/0-length Series/DataFrames - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - message=".*(empty slice|0 for slice).*", - category=RuntimeWarning, - ) - - # test consistency between different rolling_* moments - self._test_moments_consistency( - min_periods=min_periods, - count=lambda x: (x.rolling(window=window, center=center).count()), - mean=lambda x: ( - x.rolling( - window=window, min_periods=min_periods, center=center - ).mean() - ), - mock_mean=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center) - .sum() - .divide( - x.rolling( - window=window, min_periods=min_periods, center=center - ).count() - ) - ), - corr=lambda x, y: ( - x.rolling( - window=window, min_periods=min_periods, center=center - ).corr(y) - ), - var_unbiased=lambda x: ( - x.rolling( - window=window, min_periods=min_periods, center=center - ).var() - ), - std_unbiased=lambda x: ( - x.rolling( - window=window, min_periods=min_periods, center=center - ).std() - ), - cov_unbiased=lambda x, y: ( - x.rolling( - window=window, min_periods=min_periods, center=center - ).cov(y) - ), - var_biased=lambda x: ( - x.rolling( - window=window, min_periods=min_periods, center=center - ).var(ddof=0) - ), - std_biased=lambda x: ( - x.rolling( - window=window, min_periods=min_periods, center=center - ).std(ddof=0) - ), - cov_biased=lambda x, y: ( - x.rolling( - window=window, min_periods=min_periods, center=center - ).cov(y, ddof=0) - ), - var_debiasing_factors=lambda x: ( - x.rolling(window=window, center=center) - .count() - .divide( - (x.rolling(window=window, center=center).count() - 1.0).replace( - 0.0, np.nan - ) - ) - ), - ) - - # test consistency between rolling_xyz() and either (a) - # rolling_apply of Series.xyz(), or (b) rolling_apply of - # np.nanxyz() - for (x, is_constant, no_nans) in self.data: - functions = self.base_functions - - # GH 8269 - if no_nans: - functions = self.base_functions + self.no_nan_functions - for (f, require_min_periods, name) in functions: - rolling_f = getattr( - x.rolling( - window=window, center=center, min_periods=min_periods - ), - name, - ) - - if ( - require_min_periods - and (min_periods is not None) - and (min_periods < require_min_periods) - ): - continue - - if name == "count": - rolling_f_result = rolling_f() - rolling_apply_f_result = x.rolling( - window=window, min_periods=0, center=center - ).apply(func=f, raw=True) - else: - if name in ["cov", "corr"]: - rolling_f_result = rolling_f(pairwise=False) - else: - rolling_f_result = rolling_f() - rolling_apply_f_result = x.rolling( - window=window, min_periods=min_periods, center=center - ).apply(func=f, raw=True) - - # GH 9422 - if name in ["sum", "prod"]: - tm.assert_equal(rolling_f_result, rolling_apply_f_result) - - # binary moments - def test_rolling_cov(self): - A = self.series - B = A + randn(len(A)) - - result = A.rolling(window=50, min_periods=25).cov(B) - tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1]) - - def test_rolling_cov_pairwise(self): - self._check_pairwise_moment("rolling", "cov", window=10, min_periods=5) - - def test_rolling_corr(self): - A = self.series - B = A + randn(len(A)) - - result = A.rolling(window=50, min_periods=25).corr(B) - tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1]) - - # test for correct bias correction - a = tm.makeTimeSeries() - b = tm.makeTimeSeries() - a[:5] = np.nan - b[:10] = np.nan - - result = a.rolling(window=len(a), min_periods=1).corr(b) - tm.assert_almost_equal(result[-1], a.corr(b)) - - def test_rolling_corr_pairwise(self): - self._check_pairwise_moment("rolling", "corr", window=10, min_periods=5) - - @pytest.mark.parametrize("window", range(7)) - def test_rolling_corr_with_zero_variance(self, window): - # GH 18430 - s = pd.Series(np.zeros(20)) - other = pd.Series(np.arange(20)) - - assert s.rolling(window=window).corr(other=other).isna().all() - - def _check_pairwise_moment(self, dispatch, name, **kwargs): - def get_result(obj, obj2=None): - return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2) - - result = get_result(self.frame) - result = result.loc[(slice(None), 1), 5] - result.index = result.index.droplevel(1) - expected = get_result(self.frame[1], self.frame[5]) - tm.assert_series_equal(result, expected, check_names=False) - - def test_flex_binary_moment(self): - # GH3155 - # don't blow the stack - msg = ( - "arguments to moment function must be of type" - " np.ndarray/Series/DataFrame" - ) - with pytest.raises(TypeError, match=msg): - rwindow._flex_binary_moment(5, 6, None) - - def test_corr_sanity(self): - # GH 3155 - df = DataFrame( - np.array( - [ - [0.87024726, 0.18505595], - [0.64355431, 0.3091617], - [0.92372966, 0.50552513], - [0.00203756, 0.04520709], - [0.84780328, 0.33394331], - [0.78369152, 0.63919667], - ] - ) - ) - - res = df[0].rolling(5, center=True).corr(df[1]) - assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res) - - # and some fuzzing - for _ in range(10): - df = DataFrame(np.random.rand(30, 2)) - res = df[0].rolling(5, center=True).corr(df[1]) - try: - assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res) - except AssertionError: - print(res) - - @pytest.mark.parametrize("method", ["corr", "cov"]) - def test_flex_binary_frame(self, method): - series = self.frame[1] - - res = getattr(series.rolling(window=10), method)(self.frame) - res2 = getattr(self.frame.rolling(window=10), method)(series) - exp = self.frame.apply(lambda x: getattr(series.rolling(window=10), method)(x)) - - tm.assert_frame_equal(res, exp) - tm.assert_frame_equal(res2, exp) - - frame2 = self.frame.copy() - frame2.values[:] = np.random.randn(*frame2.shape) - - res3 = getattr(self.frame.rolling(window=10), method)(frame2) - exp = DataFrame( - { - k: getattr(self.frame[k].rolling(window=10), method)(frame2[k]) - for k in self.frame - } - ) - tm.assert_frame_equal(res3, exp) - - def test_ewmcov(self): - self._check_binary_ew("cov") - - def test_ewmcov_pairwise(self): - self._check_pairwise_moment("ewm", "cov", span=10, min_periods=5) - - def test_ewmcorr(self): - self._check_binary_ew("corr") - - def test_ewmcorr_pairwise(self): - self._check_pairwise_moment("ewm", "corr", span=10, min_periods=5) - - def _check_binary_ew(self, name): - def func(A, B, com, **kwargs): - return getattr(A.ewm(com, **kwargs), name)(B) - - A = Series(randn(50), index=np.arange(50)) - B = A[2:] + randn(48) - - A[:10] = np.NaN - B[-10:] = np.NaN - - result = func(A, B, 20, min_periods=5) - assert np.isnan(result.values[:14]).all() - assert not np.isnan(result.values[14:]).any() - - # GH 7898 - for min_periods in (0, 1, 2): - result = func(A, B, 20, min_periods=min_periods) - # binary functions (ewmcov, ewmcorr) with bias=False require at - # least two values - assert np.isnan(result.values[:11]).all() - assert not np.isnan(result.values[11:]).any() - - # check series of length 0 - result = func(Series([]), Series([]), 50, min_periods=min_periods) - tm.assert_series_equal(result, Series([])) - - # check series of length 1 - result = func(Series([1.0]), Series([1.0]), 50, min_periods=min_periods) - tm.assert_series_equal(result, Series([np.NaN])) - - msg = "Input arrays must be of the same type!" - # exception raised is Exception - with pytest.raises(Exception, match=msg): - func(A, randn(50), 20, min_periods=5) - - def test_expanding_apply_args_kwargs(self, raw): - def mean_w_arg(x, const): - return np.mean(x) + const - - df = DataFrame(np.random.rand(20, 3)) - - expected = df.expanding().apply(np.mean, raw=raw) + 20.0 - - result = df.expanding().apply(mean_w_arg, raw=raw, args=(20,)) - tm.assert_frame_equal(result, expected) - - result = df.expanding().apply(mean_w_arg, raw=raw, kwargs={"const": 20}) - tm.assert_frame_equal(result, expected) - - def test_expanding_corr(self): - A = self.series.dropna() - B = (A + randn(len(A)))[:-5] - - result = A.expanding().corr(B) - - rolling_result = A.rolling(window=len(A), min_periods=1).corr(B) - - tm.assert_almost_equal(rolling_result, result) - - def test_expanding_count(self): - result = self.series.expanding().count() - tm.assert_almost_equal( - result, self.series.rolling(window=len(self.series)).count() - ) - - def test_expanding_quantile(self): - result = self.series.expanding().quantile(0.5) - - rolling_result = self.series.rolling( - window=len(self.series), min_periods=1 - ).quantile(0.5) - - tm.assert_almost_equal(result, rolling_result) - - def test_expanding_cov(self): - A = self.series - B = (A + randn(len(A)))[:-5] - - result = A.expanding().cov(B) - - rolling_result = A.rolling(window=len(A), min_periods=1).cov(B) - - tm.assert_almost_equal(rolling_result, result) - - def test_expanding_cov_pairwise(self): - result = self.frame.expanding().corr() - - rolling_result = self.frame.rolling( - window=len(self.frame), min_periods=1 - ).corr() - - tm.assert_frame_equal(result, rolling_result) - - def test_expanding_corr_pairwise(self): - result = self.frame.expanding().corr() - - rolling_result = self.frame.rolling( - window=len(self.frame), min_periods=1 - ).corr() - tm.assert_frame_equal(result, rolling_result) - - def test_expanding_cov_diff_index(self): - # GH 7512 - s1 = Series([1, 2, 3], index=[0, 1, 2]) - s2 = Series([1, 3], index=[0, 2]) - result = s1.expanding().cov(s2) - expected = Series([None, None, 2.0]) - tm.assert_series_equal(result, expected) - - s2a = Series([1, None, 3], index=[0, 1, 2]) - result = s1.expanding().cov(s2a) - tm.assert_series_equal(result, expected) - - s1 = Series([7, 8, 10], index=[0, 1, 3]) - s2 = Series([7, 9, 10], index=[0, 2, 3]) - result = s1.expanding().cov(s2) - expected = Series([None, None, None, 4.5]) - tm.assert_series_equal(result, expected) - - def test_expanding_corr_diff_index(self): - # GH 7512 - s1 = Series([1, 2, 3], index=[0, 1, 2]) - s2 = Series([1, 3], index=[0, 2]) - result = s1.expanding().corr(s2) - expected = Series([None, None, 1.0]) - tm.assert_series_equal(result, expected) - - s2a = Series([1, None, 3], index=[0, 1, 2]) - result = s1.expanding().corr(s2a) - tm.assert_series_equal(result, expected) - - s1 = Series([7, 8, 10], index=[0, 1, 3]) - s2 = Series([7, 9, 10], index=[0, 2, 3]) - result = s1.expanding().corr(s2) - expected = Series([None, None, None, 1.0]) - tm.assert_series_equal(result, expected) - - def test_rolling_cov_diff_length(self): - # GH 7512 - s1 = Series([1, 2, 3], index=[0, 1, 2]) - s2 = Series([1, 3], index=[0, 2]) - result = s1.rolling(window=3, min_periods=2).cov(s2) - expected = Series([None, None, 2.0]) - tm.assert_series_equal(result, expected) - - s2a = Series([1, None, 3], index=[0, 1, 2]) - result = s1.rolling(window=3, min_periods=2).cov(s2a) - tm.assert_series_equal(result, expected) - - def test_rolling_corr_diff_length(self): - # GH 7512 - s1 = Series([1, 2, 3], index=[0, 1, 2]) - s2 = Series([1, 3], index=[0, 2]) - result = s1.rolling(window=3, min_periods=2).corr(s2) - expected = Series([None, None, 1.0]) - tm.assert_series_equal(result, expected) - - s2a = Series([1, None, 3], index=[0, 1, 2]) - result = s1.rolling(window=3, min_periods=2).corr(s2a) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "f", - [ - lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=False)), - lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=False)), - lambda x: x.rolling(window=10, min_periods=5).max(), - lambda x: x.rolling(window=10, min_periods=5).min(), - lambda x: x.rolling(window=10, min_periods=5).sum(), - lambda x: x.rolling(window=10, min_periods=5).mean(), - lambda x: x.rolling(window=10, min_periods=5).std(), - lambda x: x.rolling(window=10, min_periods=5).var(), - lambda x: x.rolling(window=10, min_periods=5).skew(), - lambda x: x.rolling(window=10, min_periods=5).kurt(), - lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5), - lambda x: x.rolling(window=10, min_periods=5).median(), - lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), - lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), - lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), - ], - ) - def test_rolling_functions_window_non_shrinkage(self, f): - # GH 7764 - s = Series(range(4)) - s_expected = Series(np.nan, index=s.index) - df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"]) - df_expected = DataFrame(np.nan, index=df.index, columns=df.columns) - - try: - s_result = f(s) - tm.assert_series_equal(s_result, s_expected) - - df_result = f(df) - tm.assert_frame_equal(df_result, df_expected) - except (ImportError): - - # scipy needed for rolling_window - pytest.skip("scipy not available") - - def test_rolling_functions_window_non_shrinkage_binary(self): - - # corr/cov return a MI DataFrame - df = DataFrame( - [[1, 5], [3, 2], [3, 9], [-1, 0]], - columns=Index(["A", "B"], name="foo"), - index=Index(range(4), name="bar"), - ) - df_expected = DataFrame( - columns=Index(["A", "B"], name="foo"), - index=pd.MultiIndex.from_product( - [df.index, df.columns], names=["bar", "foo"] - ), - dtype="float64", - ) - functions = [ - lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), - lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), - ] - for f in functions: - df_result = f(df) - tm.assert_frame_equal(df_result, df_expected) - - def test_moment_functions_zero_length(self): - # GH 8056 - s = Series() - s_expected = s - df1 = DataFrame() - df1_expected = df1 - df2 = DataFrame(columns=["a"]) - df2["a"] = df2["a"].astype("float64") - df2_expected = df2 - - functions = [ - lambda x: x.expanding().count(), - lambda x: x.expanding(min_periods=5).cov(x, pairwise=False), - lambda x: x.expanding(min_periods=5).corr(x, pairwise=False), - lambda x: x.expanding(min_periods=5).max(), - lambda x: x.expanding(min_periods=5).min(), - lambda x: x.expanding(min_periods=5).sum(), - lambda x: x.expanding(min_periods=5).mean(), - lambda x: x.expanding(min_periods=5).std(), - lambda x: x.expanding(min_periods=5).var(), - lambda x: x.expanding(min_periods=5).skew(), - lambda x: x.expanding(min_periods=5).kurt(), - lambda x: x.expanding(min_periods=5).quantile(0.5), - lambda x: x.expanding(min_periods=5).median(), - lambda x: x.expanding(min_periods=5).apply(sum, raw=False), - lambda x: x.expanding(min_periods=5).apply(sum, raw=True), - lambda x: x.rolling(window=10).count(), - lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False), - lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False), - lambda x: x.rolling(window=10, min_periods=5).max(), - lambda x: x.rolling(window=10, min_periods=5).min(), - lambda x: x.rolling(window=10, min_periods=5).sum(), - lambda x: x.rolling(window=10, min_periods=5).mean(), - lambda x: x.rolling(window=10, min_periods=5).std(), - lambda x: x.rolling(window=10, min_periods=5).var(), - lambda x: x.rolling(window=10, min_periods=5).skew(), - lambda x: x.rolling(window=10, min_periods=5).kurt(), - lambda x: x.rolling(window=10, min_periods=5).quantile(0.5), - lambda x: x.rolling(window=10, min_periods=5).median(), - lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), - lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), - lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), - ] - for f in functions: - try: - s_result = f(s) - tm.assert_series_equal(s_result, s_expected) - - df1_result = f(df1) - tm.assert_frame_equal(df1_result, df1_expected) - - df2_result = f(df2) - tm.assert_frame_equal(df2_result, df2_expected) - except (ImportError): - - # scipy needed for rolling_window - continue - - def test_moment_functions_zero_length_pairwise(self): - - df1 = DataFrame() - df1_expected = df1 - df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar")) - df2["a"] = df2["a"].astype("float64") - - df1_expected = DataFrame( - index=pd.MultiIndex.from_product([df1.index, df1.columns]), - columns=Index([]), - ) - df2_expected = DataFrame( - index=pd.MultiIndex.from_product( - [df2.index, df2.columns], names=["bar", "foo"] - ), - columns=Index(["a"], name="foo"), - dtype="float64", - ) - - functions = [ - lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)), - lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)), - lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), - lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), - ] - for f in functions: - df1_result = f(df1) - tm.assert_frame_equal(df1_result, df1_expected) - - df2_result = f(df2) - tm.assert_frame_equal(df2_result, df2_expected) - - def test_expanding_cov_pairwise_diff_length(self): - # GH 7512 - df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo")) - df1a = DataFrame( - [[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo") - ) - df2 = DataFrame( - [[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo") - ) - df2a = DataFrame( - [[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo") - ) - # TODO: xref gh-15826 - # .loc is not preserving the names - result1 = df1.expanding().cov(df2a, pairwise=True).loc[2] - result2 = df1.expanding().cov(df2a, pairwise=True).loc[2] - result3 = df1a.expanding().cov(df2, pairwise=True).loc[2] - result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2] - expected = DataFrame( - [[-3.0, -6.0], [-5.0, -10.0]], - columns=Index(["A", "B"], name="foo"), - index=Index(["X", "Y"], name="foo"), - ) - tm.assert_frame_equal(result1, expected) - tm.assert_frame_equal(result2, expected) - tm.assert_frame_equal(result3, expected) - tm.assert_frame_equal(result4, expected) - - def test_expanding_corr_pairwise_diff_length(self): - # GH 7512 - df1 = DataFrame( - [[1, 2], [3, 2], [3, 4]], - columns=["A", "B"], - index=Index(range(3), name="bar"), - ) - df1a = DataFrame( - [[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"] - ) - df2 = DataFrame( - [[5, 6], [None, None], [2, 1]], - columns=["X", "Y"], - index=Index(range(3), name="bar"), - ) - df2a = DataFrame( - [[5, 6], [2, 1]], index=Index([0, 2], name="bar"), columns=["X", "Y"] - ) - result1 = df1.expanding().corr(df2, pairwise=True).loc[2] - result2 = df1.expanding().corr(df2a, pairwise=True).loc[2] - result3 = df1a.expanding().corr(df2, pairwise=True).loc[2] - result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2] - expected = DataFrame( - [[-1.0, -1.0], [-1.0, -1.0]], columns=["A", "B"], index=Index(["X", "Y"]) - ) - tm.assert_frame_equal(result1, expected) - tm.assert_frame_equal(result2, expected) - tm.assert_frame_equal(result3, expected) - tm.assert_frame_equal(result4, expected) - - def test_rolling_skew_edge_cases(self): - - all_nan = Series([np.NaN] * 5) - - # yields all NaN (0 variance) - d = Series([1] * 5) - x = d.rolling(window=5).skew() - tm.assert_series_equal(all_nan, x) - - # yields all NaN (window too small) - d = Series(np.random.randn(5)) - x = d.rolling(window=2).skew() - tm.assert_series_equal(all_nan, x) - - # yields [NaN, NaN, NaN, 0.177994, 1.548824] - d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401]) - expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824]) - x = d.rolling(window=4).skew() - tm.assert_series_equal(expected, x) - - def test_rolling_kurt_edge_cases(self): - - all_nan = Series([np.NaN] * 5) - - # yields all NaN (0 variance) - d = Series([1] * 5) - x = d.rolling(window=5).kurt() - tm.assert_series_equal(all_nan, x) - - # yields all NaN (window too small) - d = Series(np.random.randn(5)) - x = d.rolling(window=3).kurt() - tm.assert_series_equal(all_nan, x) - - # yields [NaN, NaN, NaN, 1.224307, 2.671499] - d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401]) - expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499]) - x = d.rolling(window=4).kurt() - tm.assert_series_equal(expected, x) - - def test_rolling_skew_eq_value_fperr(self): - # #18804 all rolling skew for all equal values should return Nan - a = Series([1.1] * 15).rolling(window=10).skew() - assert np.isnan(a).all() - - def test_rolling_kurt_eq_value_fperr(self): - # #18804 all rolling kurt for all equal values should return Nan - a = Series([1.1] * 15).rolling(window=10).kurt() - assert np.isnan(a).all() - - @pytest.mark.parametrize( - "func,static_comp", - [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)], - ids=["sum", "mean", "max", "min"], - ) - def test_expanding_func(self, func, static_comp): - def expanding_func(x, min_periods=1, center=False, axis=0): - exp = x.expanding(min_periods=min_periods, center=center, axis=axis) - return getattr(exp, func)() - - self._check_expanding(expanding_func, static_comp, preserve_nan=False) - - def test_expanding_apply(self, raw): - def expanding_mean(x, min_periods=1): - - exp = x.expanding(min_periods=min_periods) - result = exp.apply(lambda x: x.mean(), raw=raw) - return result - - # TODO(jreback), needed to add preserve_nan=False - # here to make this pass - self._check_expanding(expanding_mean, np.mean, preserve_nan=False) - - ser = Series([]) - tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean(), raw=raw)) - - # GH 8080 - s = Series([None, None, None]) - result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw) - expected = Series([1.0, 2.0, 3.0]) - tm.assert_series_equal(result, expected) - - def _check_expanding( - self, - func, - static_comp, - has_min_periods=True, - has_time_rule=True, - preserve_nan=True, - ): - - series_result = func(self.series) - assert isinstance(series_result, Series) - frame_result = func(self.frame) - assert isinstance(frame_result, DataFrame) - - result = func(self.series) - tm.assert_almost_equal(result[10], static_comp(self.series[:11])) - - if preserve_nan: - assert result.iloc[self._nan_locs].isna().all() - - ser = Series(randn(50)) - - if has_min_periods: - result = func(ser, min_periods=30) - assert result[:29].isna().all() - tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) - - # min_periods is working correctly - result = func(ser, min_periods=15) - assert isna(result.iloc[13]) - assert notna(result.iloc[14]) - - ser2 = Series(randn(20)) - result = func(ser2, min_periods=5) - assert isna(result[3]) - assert notna(result[4]) - - # min_periods=0 - result0 = func(ser, min_periods=0) - result1 = func(ser, min_periods=1) - tm.assert_almost_equal(result0, result1) - else: - result = func(ser) - tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) - - def test_rolling_max_gh6297(self): - """Replicate result expected in GH #6297""" - - indices = [datetime(1975, 1, i) for i in range(1, 6)] - # So that we can have 2 datapoints on one of the days - indices.append(datetime(1975, 1, 3, 6, 0)) - series = Series(range(1, 7), index=indices) - # Use floats instead of ints as values - series = series.map(lambda x: float(x)) - # Sort chronologically - series = series.sort_index() - - expected = Series( - [1.0, 2.0, 6.0, 4.0, 5.0], - index=[datetime(1975, 1, i, 0) for i in range(1, 6)], - ) - x = series.resample("D").max().rolling(window=1).max() - tm.assert_series_equal(expected, x) - - def test_rolling_max_resample(self): - - indices = [datetime(1975, 1, i) for i in range(1, 6)] - # So that we can have 3 datapoints on last day (4, 10, and 20) - indices.append(datetime(1975, 1, 5, 1)) - indices.append(datetime(1975, 1, 5, 2)) - series = Series(list(range(0, 5)) + [10, 20], index=indices) - # Use floats instead of ints as values - series = series.map(lambda x: float(x)) - # Sort chronologically - series = series.sort_index() - - # Default how should be max - expected = Series( - [0.0, 1.0, 2.0, 3.0, 20.0], - index=[datetime(1975, 1, i, 0) for i in range(1, 6)], - ) - x = series.resample("D").max().rolling(window=1).max() - tm.assert_series_equal(expected, x) - - # Now specify median (10.0) - expected = Series( - [0.0, 1.0, 2.0, 3.0, 10.0], - index=[datetime(1975, 1, i, 0) for i in range(1, 6)], - ) - x = series.resample("D").median().rolling(window=1).max() - tm.assert_series_equal(expected, x) - - # Now specify mean (4+10+20)/3 - v = (4.0 + 10.0 + 20.0) / 3.0 - expected = Series( - [0.0, 1.0, 2.0, 3.0, v], - index=[datetime(1975, 1, i, 0) for i in range(1, 6)], - ) - x = series.resample("D").mean().rolling(window=1).max() - tm.assert_series_equal(expected, x) - - def test_rolling_min_resample(self): - - indices = [datetime(1975, 1, i) for i in range(1, 6)] - # So that we can have 3 datapoints on last day (4, 10, and 20) - indices.append(datetime(1975, 1, 5, 1)) - indices.append(datetime(1975, 1, 5, 2)) - series = Series(list(range(0, 5)) + [10, 20], index=indices) - # Use floats instead of ints as values - series = series.map(lambda x: float(x)) - # Sort chronologically - series = series.sort_index() - - # Default how should be min - expected = Series( - [0.0, 1.0, 2.0, 3.0, 4.0], - index=[datetime(1975, 1, i, 0) for i in range(1, 6)], - ) - r = series.resample("D").min().rolling(window=1) - tm.assert_series_equal(expected, r.min()) - - def test_rolling_median_resample(self): - - indices = [datetime(1975, 1, i) for i in range(1, 6)] - # So that we can have 3 datapoints on last day (4, 10, and 20) - indices.append(datetime(1975, 1, 5, 1)) - indices.append(datetime(1975, 1, 5, 2)) - series = Series(list(range(0, 5)) + [10, 20], index=indices) - # Use floats instead of ints as values - series = series.map(lambda x: float(x)) - # Sort chronologically - series = series.sort_index() - - # Default how should be median - expected = Series( - [0.0, 1.0, 2.0, 3.0, 10], - index=[datetime(1975, 1, i, 0) for i in range(1, 6)], - ) - x = series.resample("D").median().rolling(window=1).median() - tm.assert_series_equal(expected, x) - - def test_rolling_median_memory_error(self): - # GH11722 - n = 20000 - Series(np.random.randn(n)).rolling(window=2, center=False).median() - Series(np.random.randn(n)).rolling(window=2, center=False).median() - - def test_rolling_min_max_numeric_types(self): - - # GH12373 - types_test = [np.dtype("f{}".format(width)) for width in [4, 8]] - types_test.extend( - [ - np.dtype("{}{}".format(sign, width)) - for width in [1, 2, 4, 8] - for sign in "ui" - ] - ) - for data_type in types_test: - # Just testing that these don't throw exceptions and that - # the return type is float64. Other tests will cover quantitative - # correctness - result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max() - assert result.dtypes[0] == np.dtype("f8") - result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min() - assert result.dtypes[0] == np.dtype("f8") - - -class TestGrouperGrouping: - def setup_method(self, method): - self.series = Series(np.arange(10)) - self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}) - - def test_mutated(self): - - msg = r"group\(\) got an unexpected keyword argument 'foo'" - with pytest.raises(TypeError, match=msg): - self.frame.groupby("A", foo=1) - - g = self.frame.groupby("A") - assert not g.mutated - g = self.frame.groupby("A", mutated=True) - assert g.mutated - - def test_getitem(self): - g = self.frame.groupby("A") - g_mutated = self.frame.groupby("A", mutated=True) - - expected = g_mutated.B.apply(lambda x: x.rolling(2).mean()) - - result = g.rolling(2).mean().B - tm.assert_series_equal(result, expected) - - result = g.rolling(2).B.mean() - tm.assert_series_equal(result, expected) - - result = g.B.rolling(2).mean() - tm.assert_series_equal(result, expected) - - result = self.frame.B.groupby(self.frame.A).rolling(2).mean() - tm.assert_series_equal(result, expected) - - def test_getitem_multiple(self): - - # GH 13174 - g = self.frame.groupby("A") - r = g.rolling(2) - g_mutated = self.frame.groupby("A", mutated=True) - expected = g_mutated.B.apply(lambda x: x.rolling(2).count()) - - result = r.B.count() - tm.assert_series_equal(result, expected) - - result = r.B.count() - tm.assert_series_equal(result, expected) - - def test_rolling(self): - g = self.frame.groupby("A") - r = g.rolling(window=4) - - for f in ["sum", "mean", "min", "max", "count", "kurt", "skew"]: - - result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.rolling(4), f)()) - tm.assert_frame_equal(result, expected) - - for f in ["std", "var"]: - result = getattr(r, f)(ddof=1) - expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1)) - tm.assert_frame_equal(result, expected) - - result = r.quantile(0.5) - expected = g.apply(lambda x: x.rolling(4).quantile(0.5)) - tm.assert_frame_equal(result, expected) - - def test_rolling_corr_cov(self): - g = self.frame.groupby("A") - r = g.rolling(window=4) - - for f in ["corr", "cov"]: - result = getattr(r, f)(self.frame) - - def func(x): - return getattr(x.rolling(4), f)(self.frame) - - expected = g.apply(func) - tm.assert_frame_equal(result, expected) - - result = getattr(r.B, f)(pairwise=True) - - def func(x): - return getattr(x.B.rolling(4), f)(pairwise=True) - - expected = g.apply(func) - tm.assert_series_equal(result, expected) - - def test_rolling_apply(self, raw): - g = self.frame.groupby("A") - r = g.rolling(window=4) - - # reduction - result = r.apply(lambda x: x.sum(), raw=raw) - expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) - tm.assert_frame_equal(result, expected) - - def test_rolling_apply_mutability(self): - # GH 14013 - df = pd.DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6}) - g = df.groupby("A") - - mi = pd.MultiIndex.from_tuples( - [("bar", 3), ("bar", 4), ("bar", 5), ("foo", 0), ("foo", 1), ("foo", 2)] - ) - - mi.names = ["A", None] - # Grouped column should not be a part of the output - expected = pd.DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi) - - result = g.rolling(window=2).sum() - tm.assert_frame_equal(result, expected) - - # Call an arbitrary function on the groupby - g.sum() - - # Make sure nothing has been mutated - result = g.rolling(window=2).sum() - tm.assert_frame_equal(result, expected) - - def test_expanding(self): - g = self.frame.groupby("A") - r = g.expanding() - - for f in ["sum", "mean", "min", "max", "count", "kurt", "skew"]: - - result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.expanding(), f)()) - tm.assert_frame_equal(result, expected) - - for f in ["std", "var"]: - result = getattr(r, f)(ddof=0) - expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0)) - tm.assert_frame_equal(result, expected) - - result = r.quantile(0.5) - expected = g.apply(lambda x: x.expanding().quantile(0.5)) - tm.assert_frame_equal(result, expected) - - def test_expanding_corr_cov(self): - g = self.frame.groupby("A") - r = g.expanding() - - for f in ["corr", "cov"]: - result = getattr(r, f)(self.frame) - - def func(x): - return getattr(x.expanding(), f)(self.frame) - - expected = g.apply(func) - tm.assert_frame_equal(result, expected) - - result = getattr(r.B, f)(pairwise=True) - - def func(x): - return getattr(x.B.expanding(), f)(pairwise=True) - - expected = g.apply(func) - tm.assert_series_equal(result, expected) - - def test_expanding_apply(self, raw): - g = self.frame.groupby("A") - r = g.expanding() - - # reduction - result = r.apply(lambda x: x.sum(), raw=raw) - expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw)) - tm.assert_frame_equal(result, expected)
closes #19228 xref #26807 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] passes `black pandas` Just splitted code into files by classes. No changes made to code cc @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/27348
2019-07-11T21:43:48Z
2019-07-12T14:29:38Z
2019-07-12T14:29:37Z
2019-07-12T18:45:35Z
Fix _can_hold_element for datetimelike blocks
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 6a681954fd902..bea73d72b91c9 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -574,7 +574,7 @@ def is_valid_nat_for_dtype(obj, dtype): ------- bool """ - if not isna(obj): + if not lib.is_scalar(obj) or not isna(obj): return False if dtype.kind == "M": return not isinstance(obj, np.timedelta64) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 24e0a7fbad0a5..e02fecf0ef114 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -9,7 +9,8 @@ from pandas._libs import NaT, lib, tslib, tslibs import pandas._libs.internals as libinternals -from pandas._libs.tslibs import Timedelta, conversion, is_null_datetimelike +from pandas._libs.tslibs import Timedelta, conversion +from pandas._libs.tslibs.timezones import tz_compare from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -60,7 +61,13 @@ ABCPandasArray, ABCSeries, ) -from pandas.core.dtypes.missing import _isna_compat, array_equivalent, isna, notna +from pandas.core.dtypes.missing import ( + _isna_compat, + array_equivalent, + is_valid_nat_for_dtype, + isna, + notna, +) import pandas.core.algorithms as algos from pandas.core.arrays import ( @@ -2248,14 +2255,17 @@ def _astype(self, dtype, **kwargs): def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: - return tipo == _NS_DTYPE or tipo == np.int64 + return is_dtype_equal(tipo, self.dtype) + elif element is NaT: + return True elif isinstance(element, datetime): + if self.is_datetimetz: + return tz_compare(element.tzinfo, self.dtype.tz) return element.tzinfo is None elif is_integer(element): return element == tslibs.iNaT - # TODO: shouldnt we exclude timedelta64("NaT")? See GH#27297 - return isna(element) + return is_valid_nat_for_dtype(element, self.dtype) def _coerce_values(self, values): return values.view("i8") @@ -2275,8 +2285,10 @@ def _try_coerce_args(self, other): ------- base-type other """ - if is_null_datetimelike(other): + if is_valid_nat_for_dtype(other, self.dtype): other = tslibs.iNaT + elif is_integer(other) and other == tslibs.iNaT: + pass elif isinstance(other, (datetime, np.datetime64, date)): other = self._box_func(other) if getattr(other, "tz") is not None: @@ -2359,6 +2371,8 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): is_datetimetz = True is_extension = True + _can_hold_element = DatetimeBlock._can_hold_element + @property def _holder(self): return DatetimeArray @@ -2465,8 +2479,10 @@ def _try_coerce_args(self, other): # add the tz back other = self._holder(other, dtype=self.dtype) - elif is_null_datetimelike(other): + elif is_valid_nat_for_dtype(other, self.dtype): other = tslibs.iNaT + elif is_integer(other) and other == tslibs.iNaT: + pass elif isinstance(other, self._holder): if other.tz != self.values.tz: raise ValueError("incompatible or non tz-aware value") @@ -2606,10 +2622,16 @@ def _box_func(self): def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: + # TODO: remove the np.int64 support once coerce_values and + # _try_coerce_args both coerce to m8[ns] and not i8. return issubclass(tipo.type, (np.timedelta64, np.int64)) elif element is NaT: return True - return is_integer(element) or isinstance(element, (timedelta, np.timedelta64)) + elif isinstance(element, (timedelta, np.timedelta64)): + return True + elif is_integer(element): + return element == tslibs.iNaT + return is_valid_nat_for_dtype(element, self.dtype) def fillna(self, value, **kwargs): @@ -2645,8 +2667,10 @@ def _try_coerce_args(self, other): base-type other """ - if is_null_datetimelike(other): + if is_valid_nat_for_dtype(other, self.dtype): other = tslibs.iNaT + elif is_integer(other) and other == tslibs.iNaT: + pass elif isinstance(other, (timedelta, np.timedelta64)): other = Timedelta(other).value elif hasattr(other, "dtype") and is_timedelta64_dtype(other):
I've got a bunch of branches going that touch this, easier to just get it right once.
https://api.github.com/repos/pandas-dev/pandas/pulls/27347
2019-07-11T20:48:41Z
2019-07-15T20:13:03Z
2019-07-15T20:13:03Z
2019-07-15T21:06:19Z
CLN: revisit build warnings in cython templates
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 8e351244b7f43..000689f634545 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -76,7 +76,11 @@ def group_last_{{name}}({{c_type}}[:, :] out, val = values[i, j] # not nan - if val == val and val != {{nan_val}}: + if ( + {{if not name.startswith("int")}} + val == val and + {{endif}} + val != {{nan_val}}): nobs[lab, j] += 1 resx[lab, j] = val @@ -133,7 +137,11 @@ def group_nth_{{name}}({{c_type}}[:, :] out, val = values[i, j] # not nan - if val == val and val != {{nan_val}}: + if ( + {{if not name.startswith("int")}} + val == val and + {{endif}} + val != {{nan_val}}): nobs[lab, j] += 1 if nobs[lab, j] == rank: resx[lab, j] = val diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index bf2189a8c1fd7..17f1d011af01b 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -424,8 +424,12 @@ cdef class {{name}}HashTable(HashTable): for i in range(n): val = values[i] - if ignore_na and (val != val - or (use_na_value and val == na_value2)): + if ignore_na and ( + {{if not name.lower().startswith(("uint", "int"))}} + val != val or + {{endif}} + (use_na_value and val == na_value2) + ): # if missing values do not count as unique values (i.e. if # ignore_na is True), skip the hashtable entry for them, # and replace the corresponding label with na_sentinel
- [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Retrying dropped portions from #27157, which amend cython templates, to deal with special-cases based on type. In this Pr, compiler warnings about `x==x` being useless in the case of ints, but not floats (since `nan!=nan`). In https://github.com/pandas-dev/pandas/pull/27157#discussion_r299103166 these were rejected because there wasn't agreement about how this should done and a separate discussion was requested. Annoyingly, these changes agree with what already exists in these files (in multiple places): https://github.com/pandas-dev/pandas/blob/355e322b5ed4a31c9e1cab9652510e85a54f23a3/pandas/_libs/groupby_helper.pxi.in#L212-L216 But the file is already inconsistent in how these type checks are done: https://github.com/pandas-dev/pandas/blob/355e322b5ed4a31c9e1cab9652510e85a54f23a3/pandas/_libs/groupby_helper.pxi.in#L391 Making a decision here also required to get rid of the warnings mentioned in https://github.com/pandas-dev/pandas/issues/27169, in that case fused types which mix signed/unsigned trigger warning when the code later includes comparisons with a signed quantity. So there too, type checks in the template are the obvious fix. cc @WillAyd, @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/27346
2019-07-11T19:34:25Z
2019-07-12T14:43:47Z
2019-07-12T14:43:46Z
2019-07-31T16:19:41Z
Accept empty dataframes in DataFrame.to_parquet
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index ebe8b4770f6aa..f85449bf206e4 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1101,6 +1101,7 @@ I/O - Fixed bug in :func:`DataFrame.to_excel()` where custom objects (i.e. `PeriodIndex`) inside merged cells were not being converted into types safe for the Excel writer (:issue:`27006`) - Bug in :meth:`read_hdf` where reading a timezone aware :class:`DatetimeIndex` would raise a ``TypeError`` (:issue:`11926`) - Bug in :meth:`to_msgpack` and :meth:`read_msgpack` which would raise a ``ValueError`` rather than a ``FileNotFoundError`` for an invalid path (:issue:`27160`) +- Fixed bug in :meth:`DataFrame.to_parquet` which would raise a ``ValueError`` when the dataframe had no columns (:issue:`27339`) Plotting ^^^^^^^^ diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 3db05b94e5dce..a2502df45169f 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -55,7 +55,7 @@ def validate_dataframe(df): raise ValueError("to_parquet only supports IO with DataFrames") # must have value column names (strings only) - if df.columns.inferred_type not in {"string", "unicode"}: + if df.columns.inferred_type not in {"string", "unicode", "empty"}: raise ValueError("parquet must have string column names") # index level names must be strings diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 6ac2e9cd65a27..a04fb9fd50257 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -471,6 +471,11 @@ def test_partition_cols_supported(self, pa, df_full): assert len(dataset.partitions.partition_names) == 2 assert dataset.partitions.partition_names == set(partition_cols) + def test_empty_dataframe(self, pa): + # GH #27339 + df = pd.DataFrame() + check_round_trip(df, pa) + class TestParquetFastParquet(Base): @td.skip_if_no("fastparquet", min_version="0.2.1") @@ -566,3 +571,10 @@ def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full): partition_on=partition_cols, partition_cols=partition_cols, ) + + def test_empty_dataframe(self, fp): + # GH #27339 + df = pd.DataFrame() + expected = df.copy() + expected.index.name = "index" + check_round_trip(df, fp, expected=expected)
Fixes #27339 I wrote two tests because `fastparquet` adds a name to the index when it deserializes. - [x] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27341
2019-07-11T17:08:09Z
2019-07-11T19:14:50Z
2019-07-11T19:14:50Z
2019-07-11T20:07:45Z
CLN: remove Hypothesis warning during test
diff --git a/pandas/conftest.py b/pandas/conftest.py index ef2758d263e1a..2cf7bf6a6df41 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -25,7 +25,6 @@ # if it really is slow add `@settings(deadline=...)` with a working value, # or `deadline=None` to entirely disable timeouts for that test. deadline=500, - timeout=hypothesis.unlimited, suppress_health_check=(hypothesis.HealthCheck.too_slow,), ) hypothesis.settings.load_profile("ci")
```plain hypothesis\_settings.py:193: HypothesisDeprecationWarning: The timeout setting can safely be removed with no effect. ``` Starting with 4.0.0 released in Jan, the timeout setting does [nothing](https://github.com/HypothesisWorks/hypothesis/commit/2b9506b7120c2f0593539c1e5d9b526d7ec1377f). https://hypothesis.readthedocs.io/en/latest/settings.html#hypothesis.settings.timeout ```plain The timeout setting has been deprecated and no longer does anything. default value: not_set The timeout setting can safely be removed with no effect. ``` Bump dependency from `>=3.8.2` and remove setting.
https://api.github.com/repos/pandas-dev/pandas/pulls/27336
2019-07-11T08:52:59Z
2019-07-11T21:01:00Z
2019-07-11T21:01:00Z
2019-07-11T21:17:30Z
ENH: Use IntergerArray to avoid forced conversion from integer to float
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index a14efd3313eaf..5b27c953450e3 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -971,6 +971,7 @@ cdef class Seen: bint nat_ # seen nat bint bool_ # seen_bool bint null_ # seen_null + bint nan_ # seen_np.nan bint uint_ # seen_uint (unsigned integer) bint sint_ # seen_sint (signed integer) bint float_ # seen_float @@ -995,6 +996,7 @@ cdef class Seen: self.nat_ = 0 self.bool_ = 0 self.null_ = 0 + self.nan_ = 0 self.uint_ = 0 self.sint_ = 0 self.float_ = 0 @@ -1956,10 +1958,37 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, @cython.wraparound(False) def maybe_convert_objects(ndarray[object] objects, bint try_float=0, bint safe=0, bint convert_datetime=0, - bint convert_timedelta=0): + bint convert_timedelta=0, + bint convert_to_nullable_integer=0): """ Type inference function-- convert object array to proper dtype + + Parameters + ---------- + values : ndarray + Array of object elements to convert. + try_float : bool, default False + If an array-like object contains only float or NaN values is + encountered, whether to convert and return an array of float dtype. + safe : bool, default False + Whether to upcast numeric type (e.g. int cast to float). If set to + True, no upcasting will be performed. + convert_datetime : bool, default False + If an array-like object contains only datetime values or NaT is + encountered, whether to convert and return an array of M8[ns] dtype. + convert_timedelta : bool, default False + If an array-like object contains only timedelta values or NaT is + encountered, whether to convert and return an array of m8[ns] dtype. + convert_to_nullable_integer : bool, default False + If an array-like object contains only interger values (and NaN) is + encountered, whether to convert and return an IntegerArray. + + Returns + ------- + array : array of converted object values to more specific dtypes if + pplicable """ + cdef: Py_ssize_t i, n ndarray[float64_t] floats @@ -1980,6 +2009,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, ints = np.empty(n, dtype='i8') uints = np.empty(n, dtype='u8') bools = np.empty(n, dtype=np.uint8) + mask = np.full(n, False) if convert_datetime: datetimes = np.empty(n, dtype='M8[ns]') @@ -1997,6 +2027,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, if val is None: seen.null_ = 1 floats[i] = complexes[i] = fnan + mask[i] = True elif val is NaT: seen.nat_ = 1 if convert_datetime: @@ -2006,6 +2037,10 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, if not (convert_datetime or convert_timedelta): seen.object_ = 1 break + elif val is np.nan: + seen.nan_ = 1 + mask[i] = True + floats[i] = complexes[i] = val elif util.is_bool_object(val): seen.bool_ = 1 bools[i] = val @@ -2087,11 +2122,19 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, if not seen.object_: if not safe: - if seen.null_: + if seen.null_ or seen.nan_: if seen.is_float_or_complex: if seen.complex_: return complexes - elif seen.float_ or seen.int_: + elif seen.float_: + return floats + elif seen.int_: + if convert_to_nullable_integer: + from pandas.core.arrays import IntegerArray + return IntegerArray(ints, mask) + else: + return floats + elif seen.nan_: return floats else: if not seen.bool_: @@ -2130,7 +2173,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, if seen.complex_: if not seen.int_: return complexes - elif seen.float_: + elif seen.float_ or seen.nan_: if not seen.int_: return floats else: @@ -2154,7 +2197,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, if seen.complex_: if not seen.int_: return complexes - elif seen.float_: + elif seen.float_ or seen.nan_: if not seen.int_: return floats elif seen.int_: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7e3c2200dbabc..c5b6995e5d3bd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -857,9 +857,9 @@ def style(self): ... index=['panda', 'polar', 'koala']) >>> df species population - panda bear 1864 - polar bear 22000 - koala marsupial 80000 + panda bear 1864 + polar bear 22000 + koala marsupial 80000 >>> for label, content in df.items(): ... print('label:', label) ... print('content:', content, sep='\n') diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 05a2803b3fc2f..bb62db431ac73 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -548,6 +548,7 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): tuple arrays, columns """ + if columns is None: gen = (list(x.keys()) for x in data) types = (dict, OrderedDict) if PY36 else OrderedDict diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 62fb118f719e3..0408c78ac1536 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -51,6 +51,7 @@ Timestamp, isna, ) +from pandas.core.arrays import IntegerArray import pandas.util.testing as tm @@ -552,6 +553,20 @@ def test_maybe_convert_objects_datetime(self): out = lib.maybe_convert_objects(arr, convert_datetime=1, convert_timedelta=1) tm.assert_numpy_array_equal(out, exp) + @pytest.mark.parametrize( + "exp", + [ + IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True])), + IntegerArray(np.array([2, 0], dtype="int64"), np.array([False, True])), + ], + ) + def test_maybe_convert_objects_nullable_integer(self, exp): + # GH27335 + arr = np.array([2, np.NaN], dtype=object) + result = lib.maybe_convert_objects(arr, convert_to_nullable_integer=1) + + tm.assert_extension_array_equal(result, exp) + def test_mixed_dtypes_remain_object_array(self): # GH14956 array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object)
- [ ] closes #xxxx - [x] tests added / passed - [ ] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This is a new attempt to solve issue #16918 after an attempt with PR #27073
https://api.github.com/repos/pandas-dev/pandas/pulls/27335
2019-07-11T04:56:41Z
2019-11-13T02:06:33Z
2019-11-13T02:06:33Z
2022-04-06T11:25:14Z
CLN: requested follow-ups
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4e05dfca43e78..0e2253aed1c88 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -274,7 +274,6 @@ def _setup_axes( info_axis=None, stat_axis=None, aliases=None, - slicers=None, axes_are_reversed=False, build_axes=True, ns=None, @@ -288,7 +287,6 @@ def _setup_axes( info_axis_num : the axis of the selector dimension (int) stat_axis_num : the number of axis for the default stats (int) aliases : other names for a single axis (dict) - slicers : how axes slice to others (dict) axes_are_reversed : boolean whether to treat passed axes as reversed (DataFrame) build_axes : setup the axis properties (default True) @@ -300,7 +298,6 @@ def _setup_axes( cls._AXIS_ALIASES = aliases or dict() cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()} cls._AXIS_NAMES = dict(enumerate(axes)) - cls._AXIS_SLICEMAP = slicers or None cls._AXIS_REVERSED = axes_are_reversed # typ @@ -347,15 +344,6 @@ def _construct_axes_dict_from(self, axes, **kwargs): d.update(kwargs) return d - def _construct_axes_dict_for_slice(self, axes=None, **kwargs): - """Return an axes dictionary for myself.""" - d = { - self._AXIS_SLICEMAP[a]: self._get_axis(a) - for a in (axes or self._AXIS_ORDERS) - } - d.update(kwargs) - return d - def _construct_axes_from_arguments( self, args, kwargs, require_all=False, sentinel=None ): @@ -577,18 +565,6 @@ def _obj_with_exclusions(self): """ internal compat with SelectionMixin """ return self - def _expand_axes(self, key): - new_axes = [] - for k, ax in zip(key, self.axes): - if k not in ax: - if type(k) != ax.dtype.type: - ax = ax.astype("O") - new_axes.append(ax.insert(len(ax), k)) - else: - new_axes.append(ax) - - return new_axes - def set_axis(self, labels, axis=0, inplace=None): """ Assign desired index to given axis. diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index 7b0030b91e4dc..70c48e969172f 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -36,9 +36,19 @@ def is_scalar_indexer(indexer, arr_value) -> bool: return False -def is_empty_indexer(indexer, arr_value) -> bool: - # return a boolean if we have an empty indexer +def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool: + """ + Check if we have an empty indexer. + + Parameters + ---------- + indexer : object + arr_value : np.ndarray + Returns + ------- + bool + """ if is_list_like(indexer) and not len(indexer): return True if arr_value.ndim == 1: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c31d6538ad2c3..01f338a021cec 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -158,9 +158,7 @@ def _get_label(self, label, axis=None): return self.obj._xs(label, axis=axis) - def _get_loc(self, key, axis=None): - if axis is None: - axis = self.axis + def _get_loc(self, key, axis: int): return self.obj._ixs(key, axis=axis) def _slice(self, obj, axis=None, kind=None): @@ -172,11 +170,11 @@ def _get_setitem_indexer(self, key): if self.axis is not None: return self._convert_tuple(key, is_setter=True) - axis = self.obj._get_axis(0) + ax = self.obj._get_axis(0) - if isinstance(axis, MultiIndex) and self.name != "iloc": + if isinstance(ax, MultiIndex) and self.name != "iloc": try: - return axis.get_loc(key) + return ax.get_loc(key) except Exception: pass @@ -189,8 +187,9 @@ def _get_setitem_indexer(self, key): if isinstance(key, range): return self._convert_range(key, is_setter=True) + axis = self.axis or 0 try: - return self._convert_to_indexer(key, is_setter=True) + return self._convert_to_indexer(key, axis=axis, is_setter=True) except TypeError as e: # invalid indexer type vs 'other' indexing errors @@ -206,7 +205,7 @@ def __setitem__(self, key, value): indexer = self._get_setitem_indexer(key) self._setitem_with_indexer(indexer, value) - def _validate_key(self, key, axis): + def _validate_key(self, key, axis: int): """ Ensure that key is valid for current indexer. @@ -214,7 +213,6 @@ def _validate_key(self, key, axis): ---------- key : scalar, slice or list-like The key requested - axis : int Dimension on which the indexing is being made @@ -222,14 +220,12 @@ def _validate_key(self, key, axis): ------ TypeError If the key (or some element of it) has wrong type - IndexError If the key (or some element of it) is out of bounds - KeyError If the key was not found """ - raise AbstractMethodError() + raise AbstractMethodError(self) def _has_valid_tuple(self, key): """ check the key for valid keys across my indexer """ @@ -249,7 +245,7 @@ def _is_nested_tuple_indexer(self, tup): return any(is_nested_tuple(tup, ax) for ax in self.obj.axes) return False - def _convert_tuple(self, key, is_setter=False): + def _convert_tuple(self, key, is_setter: bool = False): keyidx = [] if self.axis is not None: axis = self.obj._get_axis_number(self.axis) @@ -268,19 +264,17 @@ def _convert_tuple(self, key, is_setter=False): keyidx.append(idx) return tuple(keyidx) - def _convert_range(self, key, is_setter=False): + def _convert_range(self, key, is_setter: bool = False): """ convert a range argument """ return list(key) - def _convert_scalar_indexer(self, key, axis): + def _convert_scalar_indexer(self, key, axis: int): # if we are accessing via lowered dim, use the last dim - if axis is None: - axis = 0 ax = self.obj._get_axis(min(axis, self.ndim - 1)) # a scalar return ax._convert_scalar_indexer(key, kind=self.name) - def _convert_slice_indexer(self, key, axis): + def _convert_slice_indexer(self, key, axis: int): # if we are accessing via lowered dim, use the last dim ax = self.obj._get_axis(min(axis, self.ndim - 1)) return ax._convert_slice_indexer(key, kind=self.name) @@ -883,7 +877,7 @@ def _multi_take(self, tup): } return o._reindex_with_indexers(d, copy=True, allow_dups=True) - def _convert_for_reindex(self, key, axis=None): + def _convert_for_reindex(self, key, axis: int): return key def _handle_lowerdim_multi_index_axis0(self, tup): @@ -1055,7 +1049,7 @@ def _getitem_axis(self, key, axis=None): return self._get_label(key, axis=axis) - def _get_listlike_indexer(self, key, axis, raise_missing=False): + def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): """ Transform a list-like of keys into a new index and an indexer. @@ -1151,7 +1145,9 @@ def _getitem_iterable(self, key, axis: int): {axis: [keyarr, indexer]}, copy=True, allow_dups=True ) - def _validate_read_indexer(self, key, indexer, axis, raise_missing=False): + def _validate_read_indexer( + self, key, indexer, axis: int, raise_missing: bool = False + ): """ Check that indexer can be used to return a result (e.g. at least one element was found, unless the list of keys was actually empty). @@ -1216,7 +1212,9 @@ def _validate_read_indexer(self, key, indexer, axis, raise_missing=False): if not (ax.is_categorical() or ax.is_interval()): warnings.warn(_missing_key_warning, FutureWarning, stacklevel=6) - def _convert_to_indexer(self, obj, axis=None, is_setter=False, raise_missing=False): + def _convert_to_indexer( + self, obj, axis: int, is_setter: bool = False, raise_missing: bool = False + ): """ Convert indexing key into something we can use to do actual fancy indexing on an ndarray @@ -1231,9 +1229,6 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False, raise_missing=Fal raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing """ - if axis is None: - axis = self.axis or 0 - labels = self.obj._get_axis(axis) if isinstance(obj, slice): @@ -1362,7 +1357,7 @@ def __init__(self, name, obj): super().__init__(name, obj) @Appender(_NDFrameIndexer._validate_key.__doc__) - def _validate_key(self, key, axis): + def _validate_key(self, key, axis: int): if isinstance(key, slice): return True @@ -1378,7 +1373,7 @@ def _validate_key(self, key, axis): return True - def _convert_for_reindex(self, key, axis=None): + def _convert_for_reindex(self, key, axis: int): """ Transform a list of keys into a new array ready to be used as axis of the object we return (e.g. including NaNs). @@ -1394,9 +1389,6 @@ def _convert_for_reindex(self, key, axis=None): ------- list-like of labels """ - - if axis is None: - axis = self.axis or 0 labels = self.obj._get_axis(axis) if com.is_bool_indexer(key): @@ -1726,7 +1718,7 @@ class _LocIndexer(_LocationIndexer): _exception = KeyError @Appender(_NDFrameIndexer._validate_key.__doc__) - def _validate_key(self, key, axis): + def _validate_key(self, key, axis: int): # valid for a collection of labels (we check their presence later) # slice of labels (where start-end in labels) @@ -2006,7 +1998,7 @@ class _iLocIndexer(_LocationIndexer): _exception = IndexError _get_slice_axis = _NDFrameIndexer._get_slice_axis - def _validate_key(self, key, axis): + def _validate_key(self, key, axis: int): if com.is_bool_indexer(key): if hasattr(key, "index") and isinstance(key.index, Index): if key.index.inferred_type == "integer": @@ -2132,7 +2124,7 @@ def _getitem_tuple(self, tup): return retval - def _get_list_axis(self, key, axis=None): + def _get_list_axis(self, key, axis: int): """ Return Series values by list or array of integers @@ -2145,8 +2137,6 @@ def _get_list_axis(self, key, axis=None): ------- Series object """ - if axis is None: - axis = self.axis or 0 try: return self.obj._take(key, axis=axis) except IndexError: @@ -2184,10 +2174,11 @@ def _getitem_axis(self, key, axis=None): return self._get_loc(key, axis=axis) - def _convert_to_indexer(self, obj, axis=None, is_setter=False): + # raise_missing is included for compat with the parent class signature + def _convert_to_indexer( + self, obj, axis: int, is_setter: bool = False, raise_missing: bool = False + ): """ much simpler as we only have to deal with our valid types """ - if axis is None: - axis = self.axis or 0 # make need to convert a float key if isinstance(obj, slice): @@ -2209,7 +2200,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False): class _ScalarAccessIndexer(_NDFrameIndexer): """ access scalars quickly """ - def _convert_key(self, key, is_setter=False): + def _convert_key(self, key, is_setter: bool = False): return list(key) def __getitem__(self, key): @@ -2289,7 +2280,7 @@ class _AtIndexer(_ScalarAccessIndexer): _takeable = False - def _convert_key(self, key, is_setter=False): + def _convert_key(self, key, is_setter: bool = False): """ require they keys to be the same type as the index (so we don't fallback) """ @@ -2366,7 +2357,7 @@ class _iAtIndexer(_ScalarAccessIndexer): def _has_valid_setitem_indexer(self, indexer): self._has_valid_positional_setitem_indexer(indexer) - def _convert_key(self, key, is_setter=False): + def _convert_key(self, key, is_setter: bool = False): """ require integer args (and convert to label arguments) """ for a, i in zip(self.obj.axes, key): if not is_integer(i): diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 1e84437f5c2fc..f931df25c4fd5 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2249,9 +2249,9 @@ def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return tipo == _NS_DTYPE or tipo == np.int64 - if isinstance(element, datetime): + elif isinstance(element, datetime): return element.tzinfo is None - if is_integer(element): + elif is_integer(element): return element == tslibs.iNaT # TODO: shouldnt we exclude timedelta64("NaT")? See GH#27297 @@ -2607,7 +2607,7 @@ def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, (np.timedelta64, np.int64)) - if element is NaT: + elif element is NaT: return True return is_integer(element) or isinstance( element, (timedelta, np.timedelta64, np.int64) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 9206463e18fb3..1db177d792401 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3974,6 +3974,7 @@ def process_filter(field, filt): for axis_name in obj._AXIS_NAMES.values(): axis_number = obj._get_axis_number(axis_name) axis_values = obj._get_axis(axis_name) + assert axis_number is not None # see if the field is the name of an axis if field == axis_name: diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 1fbecbab469e4..2baee8bbe27a6 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -881,13 +881,13 @@ def check(series, other): _check_op(series, other, operator.pow, pos_only=True) - _check_op(series, other, lambda x, y: operator.add(y, x)) - _check_op(series, other, lambda x, y: operator.sub(y, x)) - _check_op(series, other, lambda x, y: operator.truediv(y, x)) - _check_op(series, other, lambda x, y: operator.floordiv(y, x)) - _check_op(series, other, lambda x, y: operator.mul(y, x)) - _check_op(series, other, lambda x, y: operator.pow(y, x), pos_only=True) - _check_op(series, other, lambda x, y: operator.mod(y, x)) + _check_op(series, other, ops.radd) + _check_op(series, other, ops.rsub) + _check_op(series, other, ops.rtruediv) + _check_op(series, other, ops.rfloordiv) + _check_op(series, other, ops.rmul) + _check_op(series, other, ops.rpow, pos_only=True) + _check_op(series, other, ops.rmod) tser = tm.makeTimeSeries().rename("ts") check(tser, tser * 2) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 3c102f49c6cbf..ae24ad65d2c56 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -1680,6 +1680,7 @@ def test_setitem_single_column_mixed_datetime(self): df.loc["d", :] = np.nan assert not isna(df.loc["c", :]).all() + # FIXME: don't leave commented-out # as of GH 3216 this will now work! # try to set with a list like item # pytest.raises( diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 0c25df7997469..062c07cb6242a 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -692,10 +692,7 @@ def test_operators_corner(self): ) tm.assert_series_equal(added[:-5], expected) - pairings = [ - (Series.div, operator.truediv, 1), - (Series.rdiv, lambda x, y: operator.truediv(y, x), 1), - ] + pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)] for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]: fv = 0 lop = getattr(Series, op) diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index ad4c898b004ac..899752cde54ed 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -12,6 +12,7 @@ import pandas as pd from pandas import DataFrame, Series, SparseDtype, SparseSeries, bdate_range, isna +from pandas.core import ops from pandas.core.reshape.util import cartesian_product import pandas.core.sparse.frame as spf from pandas.tests.series.test_api import SharedWithSparse @@ -572,16 +573,16 @@ def check(a, b): _check_op(a, b, operator.floordiv) _check_op(a, b, operator.mul) - _check_op(a, b, lambda x, y: operator.add(y, x)) - _check_op(a, b, lambda x, y: operator.sub(y, x)) - _check_op(a, b, lambda x, y: operator.truediv(y, x)) - _check_op(a, b, lambda x, y: operator.floordiv(y, x)) - _check_op(a, b, lambda x, y: operator.mul(y, x)) + _check_op(a, b, ops.radd) + _check_op(a, b, ops.rsub) + _check_op(a, b, ops.rtruediv) + _check_op(a, b, ops.rfloordiv) + _check_op(a, b, ops.rmul) # FIXME: don't leave commented-out # NaN ** 0 = 1 in C? # _check_op(a, b, operator.pow) - # _check_op(a, b, lambda x, y: operator.pow(y, x)) + # _check_op(a, b, ops.rpow) check(self.bseries, self.bseries) check(self.iseries, self.iseries)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27332
2019-07-11T03:25:46Z
2019-07-11T16:28:02Z
2019-07-11T16:28:02Z
2019-07-11T16:30:47Z
BUG: fix+test assigning invalid NAT-like to DTA/TDA/PA
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 540442b7eaed4..df17388856117 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -36,7 +36,7 @@ ) from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.inference import is_array_like -from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna from pandas._typing import DatetimeLikeScalar from pandas.core import missing, nanops @@ -492,7 +492,10 @@ def __setitem__( elif isinstance(value, self._scalar_type): self._check_compatible_with(value) value = self._unbox_scalar(value) - elif isna(value) or value == iNaT: + elif is_valid_nat_for_dtype(value, self.dtype): + value = iNaT + elif not isna(value) and lib.is_integer(value) and value == iNaT: + # exclude misc e.g. object() and any NAs not allowed above value = iNaT else: msg = ( diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index f540e9297738a..6a681954fd902 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -559,3 +559,27 @@ def remove_na_arraylike(arr): return arr[notna(arr)] else: return arr[notna(lib.values_from_object(arr))] + + +def is_valid_nat_for_dtype(obj, dtype): + """ + isna check that excludes incompatible dtypes + + Parameters + ---------- + obj : object + dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype + + Returns + ------- + bool + """ + if not isna(obj): + return False + if dtype.kind == "M": + return not isinstance(obj, np.timedelta64) + if dtype.kind == "m": + return not isinstance(obj, np.datetime64) + + # must be PeriodDType + return not isinstance(obj, (np.datetime64, np.timedelta64)) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 34fae1f4b1ab4..d9646feaf661e 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -651,3 +651,51 @@ def test_array_interface(self, period_index): result = np.asarray(arr, dtype="S20") expected = np.asarray(arr).astype("S20") tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "array,casting_nats", + [ + ( + pd.TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, + (pd.NaT, np.timedelta64("NaT", "ns")), + ), + ( + pd.date_range("2000-01-01", periods=3, freq="D")._data, + (pd.NaT, np.datetime64("NaT", "ns")), + ), + (pd.period_range("2000-01-01", periods=3, freq="D")._data, (pd.NaT,)), + ], + ids=lambda x: type(x).__name__, +) +def test_casting_nat_setitem_array(array, casting_nats): + expected = type(array)._from_sequence([pd.NaT, array[1], array[2]]) + + for nat in casting_nats: + arr = array.copy() + arr[0] = nat + tm.assert_equal(arr, expected) + + +@pytest.mark.parametrize( + "array,non_casting_nats", + [ + ( + pd.TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, + (np.datetime64("NaT", "ns"),), + ), + ( + pd.date_range("2000-01-01", periods=3, freq="D")._data, + (np.timedelta64("NaT", "ns"),), + ), + ( + pd.period_range("2000-01-01", periods=3, freq="D")._data, + (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns")), + ), + ], + ids=lambda x: type(x).__name__, +) +def test_invalid_nat_setitem_array(array, non_casting_nats): + for nat in non_casting_nats: + with pytest.raises(TypeError): + array[0] = nat
Broken off from #27311 for cleaner scope and to troubleshoot platform-specific failures. Related but orthogonal to #27323.
https://api.github.com/repos/pandas-dev/pandas/pulls/27331
2019-07-11T00:06:58Z
2019-07-11T20:15:04Z
2019-07-11T20:15:04Z
2019-07-11T20:25:31Z
TST: suppress rolling warnings correctly for raw=
diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py index d85e22de1d176..2f3b83e172795 100644 --- a/pandas/tests/window/test_window.py +++ b/pandas/tests/window/test_window.py @@ -1,4 +1,5 @@ from collections import OrderedDict +import copy from datetime import datetime, timedelta import warnings from warnings import catch_warnings @@ -1536,21 +1537,20 @@ def test_rolling_apply(self, raw): # suppress warnings about empty slices, as we are deliberately testing # with a 0-length Series - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - message=".*(empty slice|0 for slice).*", - category=RuntimeWarning, - ) - - def f(x): + def f(x): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=".*(empty slice|0 for slice).*", + category=RuntimeWarning, + ) return x[np.isfinite(x)].mean() - self._check_moment_func(np.mean, name="apply", func=f, raw=raw) + self._check_moment_func(np.mean, name="apply", func=f, raw=raw) - expected = Series([]) - result = expected.rolling(10).apply(lambda x: x.mean(), raw=raw) - tm.assert_series_equal(result, expected) + expected = Series([]) + result = expected.rolling(10).apply(lambda x: x.mean(), raw=raw) + tm.assert_series_equal(result, expected) # gh-8080 s = Series([None, None, None]) @@ -1676,6 +1676,12 @@ def _check_moment_func( zero_min_periods_equal=True, **kwargs ): + + # inject raw + if name == "apply": + kwargs = copy.copy(kwargs) + kwargs["raw"] = raw + def get_result(obj, window, min_periods=None, center=False): r = obj.rolling(window=window, min_periods=min_periods, center=center) return getattr(r, name)(**kwargs)
https://api.github.com/repos/pandas-dev/pandas/pulls/27330
2019-07-10T21:38:36Z
2019-07-11T00:18:34Z
2019-07-11T00:18:34Z
2019-07-11T00:58:18Z
TST/CLN: replace %s formatting syntax with .format in tests
diff --git a/pandas/tests/arrays/categorical/test_dtypes.py b/pandas/tests/arrays/categorical/test_dtypes.py index be64b1f28c733..c08ad1da38671 100644 --- a/pandas/tests/arrays/categorical/test_dtypes.py +++ b/pandas/tests/arrays/categorical/test_dtypes.py @@ -92,20 +92,22 @@ def test_codes_dtypes(self): result = Categorical(["foo", "bar", "baz"]) assert result.codes.dtype == "int8" - result = Categorical(["foo%05d" % i for i in range(400)]) + result = Categorical(["foo{i:05d}".format(i=i) for i in range(400)]) assert result.codes.dtype == "int16" - result = Categorical(["foo%05d" % i for i in range(40000)]) + result = Categorical(["foo{i:05d}".format(i=i) for i in range(40000)]) assert result.codes.dtype == "int32" # adding cats result = Categorical(["foo", "bar", "baz"]) assert result.codes.dtype == "int8" - result = result.add_categories(["foo%05d" % i for i in range(400)]) + result = result.add_categories(["foo{i:05d}".format(i=i) for i in range(400)]) assert result.codes.dtype == "int16" # removing cats - result = result.remove_categories(["foo%05d" % i for i in range(300)]) + result = result.remove_categories( + ["foo{i:05d}".format(i=i) for i in range(300)] + ) assert result.codes.dtype == "int8" @pytest.mark.parametrize("ordered", [True, False]) diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py index 183eaada16452..a6836c58348b3 100644 --- a/pandas/tests/arrays/sparse/test_libsparse.py +++ b/pandas/tests/arrays/sparse/test_libsparse.py @@ -596,6 +596,6 @@ def _check_case(xloc, xlen, yloc, ylen, eloc, elen): @pytest.mark.parametrize("opname", ["add", "sub", "mul", "truediv", "floordiv"]) def test_op(self, opname): - sparse_op = getattr(splib, "sparse_%s_float64" % opname) + sparse_op = getattr(splib, "sparse_{opname}_float64".format(opname=opname)) python_op = getattr(operator, opname) self._op_tests(sparse_op, python_op) diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 49d11f58ebe08..8c0930c044838 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -736,16 +736,16 @@ def test_float_truncation(self): df = pd.DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) cutoff = 1000000000.0006 - result = df.query("A < %.4f" % cutoff) + result = df.query("A < {cutoff:.4f}".format(cutoff=cutoff)) assert result.empty cutoff = 1000000000.0010 - result = df.query("A > %.4f" % cutoff) + result = df.query("A > {cutoff:.4f}".format(cutoff=cutoff)) expected = df.loc[[1, 2], :] tm.assert_frame_equal(expected, result) exact = 1000000000.0011 - result = df.query("A == %.4f" % exact) + result = df.query("A == {exact:.4f}".format(exact=exact)) expected = df.loc[[1], :] tm.assert_frame_equal(expected, result) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 6824266c9282b..0b440e0186fbc 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1349,7 +1349,7 @@ def test_is_scalar_pandas_containers(self): def test_datetimeindex_from_empty_datetime64_array(): for unit in ["ms", "us", "ns"]: - idx = DatetimeIndex(np.array([], dtype="datetime64[%s]" % unit)) + idx = DatetimeIndex(np.array([], dtype="datetime64[{unit}]".format(unit=unit))) assert len(idx) == 0 diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 912e8b5fba233..c57b2a6964f39 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -342,7 +342,7 @@ def __init__(self, name, color): self.color = color def __str__(self): - return "<Thing %r>" % (self.name,) + return "<Thing {self.name!r}>".format(self=self) # necessary for pretty KeyError __repr__ = __str__ @@ -419,7 +419,7 @@ def __init__(self, name, color): self.color = color def __str__(self): - return "<Thing %r>" % (self.name,) + return "<Thing {self.name!r}>".format(self=self) thing1 = Thing("One", "red") thing2 = Thing("Two", "blue") diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 76a210e129eb3..fe59f0574fb75 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -74,19 +74,19 @@ def test_get_value(self, float_frame): def test_add_prefix_suffix(self, float_frame): with_prefix = float_frame.add_prefix("foo#") - expected = pd.Index(["foo#%s" % c for c in float_frame.columns]) + expected = pd.Index(["foo#{c}".format(c=c) for c in float_frame.columns]) tm.assert_index_equal(with_prefix.columns, expected) with_suffix = float_frame.add_suffix("#foo") - expected = pd.Index(["%s#foo" % c for c in float_frame.columns]) + expected = pd.Index(["{c}#foo".format(c=c) for c in float_frame.columns]) tm.assert_index_equal(with_suffix.columns, expected) with_pct_prefix = float_frame.add_prefix("%") - expected = pd.Index(["%{}".format(c) for c in float_frame.columns]) + expected = pd.Index(["%{c}".format(c=c) for c in float_frame.columns]) tm.assert_index_equal(with_pct_prefix.columns, expected) with_pct_suffix = float_frame.add_suffix("%") - expected = pd.Index(["{}%".format(c) for c in float_frame.columns]) + expected = pd.Index(["{c}%".format(c=c) for c in float_frame.columns]) tm.assert_index_equal(with_pct_suffix.columns, expected) def test_get_axis(self, float_frame): diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index eca827f82e296..a3817d3c226f5 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -264,7 +264,7 @@ def test_constructor_ordereddict(self): nitems = 100 nums = list(range(nitems)) random.shuffle(nums) - expected = ["A%d" % i for i in nums] + expected = ["A{i:d}".format(i=i) for i in nums] df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems))) assert expected == list(df.columns) diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 0781e20a71940..82c197ac054f0 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -453,7 +453,9 @@ def test_date_query_with_non_date(self): for op in ["<", ">", "<=", ">="]: with pytest.raises(TypeError): - df.query("dates %s nondate" % op, parser=parser, engine=engine) + df.query( + "dates {op} nondate".format(op=op), parser=parser, engine=engine + ) def test_query_syntax_error(self): engine, parser = self.engine, self.parser @@ -688,7 +690,7 @@ def test_inf(self): ops = "==", "!=" d = dict(zip(ops, (operator.eq, operator.ne))) for op, f in d.items(): - q = "a %s inf" % op + q = "a {op} inf".format(op=op) expected = df[f(df.a, np.inf)] result = df.query(q, engine=self.engine, parser=self.parser) assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index c33b758d2d62c..48f42b5f101ce 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -285,7 +285,7 @@ def test_info_shows_column_dtypes(self): df.info(buf=buf) res = buf.getvalue() for i, dtype in enumerate(dtypes): - name = "%d %d non-null %s" % (i, n, dtype) + name = "{i:d} {n:d} non-null {dtype}".format(i=i, n=n, dtype=dtype) assert name in res def test_info_max_cols(self): diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 1ca8333154c13..b8708e6ca1871 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -223,7 +223,7 @@ def test_frame_append_datetime64_col_other_units(self): ns_dtype = np.dtype("M8[ns]") for unit in units: - dtype = np.dtype("M8[%s]" % unit) + dtype = np.dtype("M8[{unit}]".format(unit=unit)) vals = np.arange(n, dtype=np.int64).view(dtype) df = DataFrame({"ints": np.arange(n)}, index=np.arange(n)) @@ -239,7 +239,7 @@ def test_frame_append_datetime64_col_other_units(self): df["dates"] = np.arange(n, dtype=np.int64).view(ns_dtype) for unit in units: - dtype = np.dtype("M8[%s]" % unit) + dtype = np.dtype("M8[{unit}]".format(unit=unit)) vals = np.arange(n, dtype=np.int64).view(dtype) tmp = df.copy() diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 33f29c6f8acb5..28051d9b7f3b9 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -718,7 +718,7 @@ def test_to_csv_withcommas(self): def test_to_csv_mixed(self): def create_cols(name): - return ["%s%03d" % (name, i) for i in range(5)] + return ["{name}{i:03d}".format(name=name, i=i) for i in range(5)] df_float = DataFrame( np.random.randn(100, 5), dtype="float64", columns=create_cols("float") diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 7905575a4a1a8..103ebf514b702 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -473,7 +473,8 @@ def test_agg_timezone_round_trip(): assert result3 == ts dates = [ - pd.Timestamp("2016-01-0%d 12:00:00" % i, tz="US/Pacific") for i in range(1, 5) + pd.Timestamp("2016-01-0{i:d} 12:00:00".format(i=i), tz="US/Pacific") + for i in range(1, 5) ] df = pd.DataFrame({"A": ["a", "b"] * 2, "B": dates}) grouped = df.groupby("A") diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 44a583bf661e8..76588549532b1 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -265,7 +265,7 @@ def desc3(group): result = group.describe() # names are different - result.index.name = "stat_%d" % len(group) + result.index.name = "stat_{:d}".format(len(group)) result = result[: len(group)] # weirdo diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index 7c12b490f46d2..b240876de92b1 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -95,7 +95,7 @@ def _check(dtype): counts = np.zeros(len(out), dtype=np.int64) labels = ensure_int64(np.repeat(np.arange(3), np.diff(np.r_[0, bins]))) - func = getattr(groupby, "group_ohlc_%s" % dtype) + func = getattr(groupby, "group_ohlc_{dtype}".format(dtype=dtype)) func(out, counts, obj[:, None], labels) def _ohlc(group): diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 5a864b3ab8cb4..7e5180a5c7b2b 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -197,8 +197,11 @@ def test_ngroup_respects_groupby_order(self): @pytest.mark.parametrize( "datetimelike", [ - [Timestamp("2016-05-%02d 20:09:25+00:00" % i) for i in range(1, 4)], - [Timestamp("2016-05-%02d 20:09:25" % i) for i in range(1, 4)], + [ + Timestamp("2016-05-{i:02d} 20:09:25+00:00".format(i=i)) + for i in range(1, 4) + ], + [Timestamp("2016-05-{i:02d} 20:09:25".format(i=i)) for i in range(1, 4)], [Timedelta(x, unit="h") for x in range(1, 4)], [Period(freq="2W", year=2017, month=x) for x in range(1, 4)], ], diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index 1b3c4e65d252b..7523b250ea291 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -36,7 +36,7 @@ def test_str(self): # test the string repr idx = self.create_index() idx.name = "foo" - assert not "length=%s" % len(idx) in str(idx) + assert not "length={}".format(len(idx)) in str(idx) assert "'foo'" in str(idx) assert idx.__class__.__name__ in str(idx) @@ -44,7 +44,7 @@ def test_str(self): if idx.tz is not None: assert idx.tz in str(idx) if hasattr(idx, "freq"): - assert "freq='%s'" % idx.freqstr in str(idx) + assert "freq='{idx.freqstr}'".format(idx=idx) in str(idx) def test_view(self): i = self.create_index() diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index aeff489861f5d..bb3fe7a136204 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -90,7 +90,7 @@ def test_week_of_month_frequency(self): def test_hash_error(self): index = date_range("20010101", periods=10) with pytest.raises( - TypeError, match=("unhashable type: %r" % type(index).__name__) + TypeError, match=("unhashable type: {0.__name__!r}".format(type(index))) ): hash(index) diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py index dba75b6247a20..472a404c2a8ef 100644 --- a/pandas/tests/indexes/multi/test_integrity.py +++ b/pandas/tests/indexes/multi/test_integrity.py @@ -253,7 +253,9 @@ def test_rangeindex_fallback_coercion_bug(): def test_hash_error(indices): index = indices - with pytest.raises(TypeError, match=("unhashable type: %r" % type(index).__name__)): + with pytest.raises( + TypeError, match=("unhashable type: {0.__name__!r}".format(type(index))) + ): hash(indices) diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py index eab55b91b3e60..8c75fbbae7de3 100644 --- a/pandas/tests/indexes/period/test_construction.py +++ b/pandas/tests/indexes/period/test_construction.py @@ -363,7 +363,7 @@ def test_constructor_year_and_quarter(self): year = pd.Series([2001, 2002, 2003]) quarter = year - 2000 idx = PeriodIndex(year=year, quarter=quarter) - strs = ["%dQ%d" % t for t in zip(quarter, year)] + strs = ["{t[0]:d}Q{t[1]:d}".format(t=t) for t in zip(quarter, year)] lops = list(map(Period, strs)) p = PeriodIndex(lops) tm.assert_index_equal(p, idx) diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index 1db2c5c3a8dac..a9c0ecd1a3041 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -161,7 +161,7 @@ def test_dti_to_period(self): @pytest.mark.parametrize("month", MONTHS) def test_to_period_quarterly(self, month): # make sure we can make the round trip - freq = "Q-%s" % month + freq = "Q-{month}".format(month=month) rng = period_range("1989Q3", "1991Q3", freq=freq) stamps = rng.to_timestamp() result = stamps.to_period(freq) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index b9bdaf40f8589..0400b7810ecc9 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -163,7 +163,7 @@ def test_dtype_str(self, indices): def test_hash_error(self, indices): index = indices with pytest.raises( - TypeError, match=("unhashable type: %r" % type(index).__name__) + TypeError, match=("unhashable type: {0.__name__!r}".format(type(index))) ): hash(indices) diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 018ccfb2439dc..e790a913fcac2 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -239,7 +239,7 @@ def test_pickle(self): def test_hash_error(self): index = timedelta_range("1 days", periods=10) with pytest.raises( - TypeError, match=("unhashable type: %r" % type(index).__name__) + TypeError, match=("unhashable type: {0.__name__!r}".format(type(index))) ): hash(index) diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index 9f1ab82ec904b..9ceeb06b6fd86 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -16,7 +16,7 @@ def _mklbl(prefix, n): - return ["%s%s" % (prefix, i) for i in range(n)] + return ["{prefix}{i}".format(prefix=prefix, i=i) for i in range(n)] def _axify(obj, key, axis): @@ -105,7 +105,7 @@ def setup_method(self, method): d = dict() for t in self._typs: - d[t] = getattr(self, "%s_%s" % (o, t), None) + d[t] = getattr(self, "{o}_{t}".format(o=o, t=t), None) setattr(self, o, d) @@ -247,7 +247,7 @@ def _print(result, error=None): # if we are in fails, the ok, otherwise raise it if fails is not None: if isinstance(detail, fails): - result = "ok (%s)" % type(detail).__name__ + result = "ok ({0.__name__})".format(type(detail)) _print(result) return diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 60a6a509c0912..85eab91af3c48 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -729,7 +729,9 @@ def test_iloc_mask(self): r = expected.get(key) if r != ans: raise AssertionError( - "[%s] does not match [%s], received [%s]" % (key, ans, r) + "[{key}] does not match [{ans}], received [{r}]".format( + key=key, ans=ans, r=r + ) ) def test_iloc_non_unique_indexing(self): diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py index ee62c91ad9698..45ccd8d1b8fb3 100644 --- a/pandas/tests/indexing/test_ix.py +++ b/pandas/tests/indexing/test_ix.py @@ -292,8 +292,8 @@ def test_ix_slicing_strings(self): def test_ix_setitem_out_of_bounds_axis_0(self): df = DataFrame( np.random.randn(2, 5), - index=["row%s" % i for i in range(2)], - columns=["col%s" % i for i in range(5)], + index=["row{i}".format(i=i) for i in range(2)], + columns=["col{i}".format(i=i) for i in range(5)], ) with catch_warnings(record=True): msg = "cannot set by positional indexing with enlargement" @@ -303,8 +303,8 @@ def test_ix_setitem_out_of_bounds_axis_0(self): def test_ix_setitem_out_of_bounds_axis_1(self): df = DataFrame( np.random.randn(5, 2), - index=["row%s" % i for i in range(5)], - columns=["col%s" % i for i in range(2)], + index=["row{i}".format(i=i) for i in range(5)], + columns=["col{i}".format(i=i) for i in range(2)], ) with catch_warnings(record=True): msg = "cannot set by positional indexing with enlargement" diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 2d4fb87d0c6bf..655e484bc34d1 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -110,7 +110,9 @@ def create_block(typestr, placement, item_shape=None, num_offset=0): elif typestr in ("complex", "c16", "c8"): values = 1.0j * (mat.astype(typestr) + num_offset) elif typestr in ("object", "string", "O"): - values = np.reshape(["A%d" % i for i in mat.ravel() + num_offset], shape) + values = np.reshape( + ["A{i:d}".format(i=i) for i in mat.ravel() + num_offset], shape + ) elif typestr in ("b", "bool"): values = np.ones(shape, dtype=np.bool_) elif typestr in ("datetime", "dt", "M8[ns]"): diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index cd8848828f6c4..a39cface0e015 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -537,7 +537,7 @@ def test_read_from_file_url(self, read_ext, datapath): # fails on some systems import platform - pytest.skip("failing on %s" % " ".join(platform.uname()).strip()) + pytest.skip("failing on {}".format(" ".join(platform.uname()).strip())) tm.assert_frame_equal(url_table, local_table) diff --git a/pandas/tests/io/excel/test_style.py b/pandas/tests/io/excel/test_style.py index 7ee84077a5334..8862f85ae9ab4 100644 --- a/pandas/tests/io/excel/test_style.py +++ b/pandas/tests/io/excel/test_style.py @@ -108,7 +108,7 @@ def custom_converter(css): for col1, col2 in zip(wb["frame"].columns, wb["styled"].columns): assert len(col1) == len(col2) for cell1, cell2 in zip(col1, col2): - ref = "%s%d" % (cell2.column, cell2.row) + ref = "{cell2.column}{cell2.row:d}".format(cell2=cell2) # XXX: this isn't as strong a test as ideal; we should # confirm that differences are exclusive if ref == "B2": @@ -156,7 +156,7 @@ def custom_converter(css): for col1, col2 in zip(wb["frame"].columns, wb["custom"].columns): assert len(col1) == len(col2) for cell1, cell2 in zip(col1, col2): - ref = "%s%d" % (cell2.column, cell2.row) + ref = "{cell2.column}{cell2.row:d}".format(cell2=cell2) if ref in ("B2", "C3", "D4", "B5", "C6", "D7", "B8", "B9"): assert not cell1.font.bold assert cell2.font.bold diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index f2fb54796f177..61c163d2cdaac 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -362,7 +362,7 @@ def color_negative_red(val): strings, black otherwise. """ color = "red" if val < 0 else "black" - return "color: %s" % color + return "color: {color}".format(color=color) dic = { ("a", "d"): [-1.12, 2.11], diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index a8a6a96f60d60..924b2a19e8504 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -610,7 +610,9 @@ def test_to_latex_multiindex_names(self, name0, name1, axes): idx_names = tuple(n or "{}" for n in names) idx_names_row = ( - "%s & %s & & & & \\\\\n" % idx_names + "{idx_names[0]} & {idx_names[1]} & & & & \\\\\n".format( + idx_names=idx_names + ) if (0 in axes and any(names)) else "" ) diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index 3ccb29f07dc83..2d2938697bd80 100755 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -352,7 +352,7 @@ def write_legacy_pickles(output_dir): pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL) fh.close() - print("created pickle file: %s" % pth) + print("created pickle file: {pth}".format(pth=pth)) def write_legacy_msgpack(output_dir, compress): @@ -369,7 +369,7 @@ def write_legacy_msgpack(output_dir, compress): pth = "{0}.msgpack".format(platform_name()) to_msgpack(os.path.join(output_dir, pth), create_msgpack_data(), compress=compress) - print("created msgpack file: %s" % pth) + print("created msgpack file: {pth}".format(pth=pth)) def write_legacy_file(): diff --git a/pandas/tests/io/msgpack/test_case.py b/pandas/tests/io/msgpack/test_case.py index 15b7090c11bad..a868da69d5459 100644 --- a/pandas/tests/io/msgpack/test_case.py +++ b/pandas/tests/io/msgpack/test_case.py @@ -5,7 +5,11 @@ def check(length, obj): v = packb(obj) - assert len(v) == length, "%r length should be %r but get %r" % (obj, length, len(v)) + assert ( + len(v) == length + ), "{obj!r} length should be {length!r} but get {got:!r}".format( + obj=obj, length=length, got=len(v) + ) assert unpackb(v, use_list=0) == obj diff --git a/pandas/tests/io/msgpack/test_extension.py b/pandas/tests/io/msgpack/test_extension.py index 12f27459f5afe..85ed43fa01079 100644 --- a/pandas/tests/io/msgpack/test_extension.py +++ b/pandas/tests/io/msgpack/test_extension.py @@ -48,7 +48,7 @@ def default(obj): typecode = 123 # application specific typecode data = tobytes(obj) return ExtType(typecode, data) - raise TypeError("Unknown type object %r" % (obj,)) + raise TypeError("Unknown type object {obj!r}".format(obj)) def ext_hook(code, data): print("ext_hook called", code, data) diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index d469d3c2e51de..b94d5cd497ccf 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -979,7 +979,7 @@ def test_nonexistent_path(all_parsers): # gh-2428: pls no segfault # gh-14086: raise more helpful FileNotFoundError parser = all_parsers - path = "%s.csv" % tm.rands(10) + path = "{}.csv".format(tm.rands(10)) msg = "does not exist" if parser.engine == "c" else r"\[Errno 2\]" with pytest.raises(FileNotFoundError, match=msg) as e: @@ -1078,7 +1078,7 @@ def test_utf16_bom_skiprows(all_parsers, sep, encoding): 4,5,6""".replace( ",", sep ) - path = "__%s__.csv" % tm.rands(10) + path = "__{}__.csv".format(tm.rands(10)) kwargs = dict(sep=sep, skiprows=2) utf8 = "utf-8" @@ -1982,7 +1982,7 @@ def test_internal_eof_byte_to_file(all_parsers): parser = all_parsers data = b'c1,c2\r\n"test \x1a test", test\r\n' expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"]) - path = "__%s__.csv" % tm.rands(10) + path = "__{}__.csv".format(tm.rands(10)) with tm.ensure_clean(path) as path: with open(path, "wb") as f: diff --git a/pandas/tests/io/parser/test_multi_thread.py b/pandas/tests/io/parser/test_multi_thread.py index 392628ee74ba2..c94adf9da0bf3 100644 --- a/pandas/tests/io/parser/test_multi_thread.py +++ b/pandas/tests/io/parser/test_multi_thread.py @@ -41,7 +41,9 @@ def test_multi_thread_string_io_read_csv(all_parsers): num_files = 100 bytes_to_df = [ - "\n".join(["%d,%d,%d" % (i, i, i) for i in range(max_row_range)]).encode() + "\n".join( + ["{i:d},{i:d},{i:d}".format(i=i) for i in range(max_row_range)] + ).encode() for _ in range(num_files) ] files = [BytesIO(b) for b in bytes_to_df] diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 5d79f6e281ef1..36391e19a102e 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -1101,7 +1101,7 @@ def test_bad_date_parse(all_parsers, cache_dates, value): # if we have an invalid date make sure that we handle this with # and w/o the cache properly parser = all_parsers - s = StringIO(("%s,\n" % value) * 50000) + s = StringIO(("{value},\n".format(value=value)) * 50000) parser.read_csv( s, diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index 72885315e06bc..9ddaccc4d38b7 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -260,7 +260,7 @@ def test_fwf_regression(): # Turns out "T060" is parsable as a datetime slice! tz_list = [1, 10, 20, 30, 60, 80, 100] widths = [16] + [8] * len(tz_list) - names = ["SST"] + ["T%03d" % z for z in tz_list[1:]] + names = ["SST"] + ["T{z:03d}".format(z=z) for z in tz_list[1:]] data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192 2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869 diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 8bdf53c3caf61..b23ddf5bd9292 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -95,10 +95,10 @@ def test_python_engine(self, python_engine): 1,2,3,4,""" for default in py_unsupported: - msg = "The %r option is not supported with the %r engine" % ( - default, - python_engine, - ) + msg = ( + "The {default!r} option is not supported with the {python_engine!r}" + " engine" + ).format(default=default, python_engine=python_engine) kwargs = {default: object()} with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/io/pytables/test_pytables.py b/pandas/tests/io/pytables/test_pytables.py index fb87749ea62e0..946334b5df05e 100644 --- a/pandas/tests/io/pytables/test_pytables.py +++ b/pandas/tests/io/pytables/test_pytables.py @@ -168,7 +168,7 @@ def teardown_class(cls): tm.set_testing_mode() def setup_method(self, method): - self.path = "tmp.__%s__.h5" % tm.rands(10) + self.path = "tmp.__{}__.h5".format(tm.rands(10)) def teardown_method(self, method): pass @@ -736,7 +736,7 @@ def test_getattr(self): # not stores for x in ["mode", "path", "handle", "complib"]: - getattr(store, "_%s" % x) + getattr(store, "_{x}".format(x=x)) def test_put(self): @@ -773,7 +773,9 @@ def test_put_string_index(self): with ensure_clean_store(self.path) as store: - index = Index(["I am a very long string index: %s" % i for i in range(20)]) + index = Index( + ["I am a very long string index: {i}".format(i=i) for i in range(20)] + ) s = Series(np.arange(20), index=index) df = DataFrame({"A": s, "B": s}) @@ -786,7 +788,7 @@ def test_put_string_index(self): # mixed length index = Index( ["abcdefghijklmnopqrstuvwxyz1234567890"] - + ["I am a very long string index: %s" % i for i in range(20)] + + ["I am a very long string index: {i}".format(i=i) for i in range(20)] ) s = Series(np.arange(21), index=index) df = DataFrame({"A": s, "B": s}) @@ -2109,7 +2111,7 @@ def test_unimplemented_dtypes_table_columns(self): df = tm.makeDataFrame() df[n] = f with pytest.raises(TypeError): - store.append("df1_%s" % n, df) + store.append("df1_{n}".format(n=n), df) # frame df = tm.makeDataFrame() @@ -2802,14 +2804,14 @@ def test_select_dtypes(self): expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa for v in [True, "true", 1]: result = store.select( - "df", "boolv == %s" % str(v), columns=["A", "boolv"] + "df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"] ) tm.assert_frame_equal(expected, result) expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa for v in [False, "false", 0]: result = store.select( - "df", "boolv == %s" % str(v), columns=["A", "boolv"] + "df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"] ) tm.assert_frame_equal(expected, result) @@ -2896,7 +2898,7 @@ def test_select_with_many_inputs(self): users=["a"] * 50 + ["b"] * 50 + ["c"] * 100 - + ["a%03d" % i for i in range(100)], + + ["a{i:03d}".format(i=i) for i in range(100)], ) ) _maybe_remove(store, "df") @@ -2917,7 +2919,7 @@ def test_select_with_many_inputs(self): tm.assert_frame_equal(expected, result) # big selector along the columns - selector = ["a", "b", "c"] + ["a%03d" % i for i in range(60)] + selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)] result = store.select( "df", "ts>=Timestamp('2012-02-01') and users=selector" ) @@ -2990,7 +2992,7 @@ def test_select_iterator(self): df1 = tm.makeTimeDataFrame(500) store.append("df1", df1, data_columns=True) - df2 = tm.makeTimeDataFrame(500).rename(columns=lambda x: "%s_2" % x) + df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format) df2["foo"] = "bar" store.append("df2", df2) @@ -3029,19 +3031,21 @@ def test_select_iterator_complete_8014(self): # select w/o iterator and where clause, single term, begin # of range, works - where = "index >= '%s'" % beg_dt + where = "index >= '{beg_dt}'".format(beg_dt=beg_dt) result = store.select("df", where=where) tm.assert_frame_equal(expected, result) # select w/o iterator and where clause, single term, end # of range, works - where = "index <= '%s'" % end_dt + where = "index <= '{end_dt}'".format(end_dt=end_dt) result = store.select("df", where=where) tm.assert_frame_equal(expected, result) # select w/o iterator and where clause, inclusive range, # works - where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) + where = "index >= '{beg_dt}' & index <= '{end_dt}'".format( + beg_dt=beg_dt, end_dt=end_dt + ) result = store.select("df", where=where) tm.assert_frame_equal(expected, result) @@ -3061,19 +3065,21 @@ def test_select_iterator_complete_8014(self): tm.assert_frame_equal(expected, result) # select w/iterator and where clause, single term, begin of range - where = "index >= '%s'" % beg_dt + where = "index >= '{beg_dt}'".format(beg_dt=beg_dt) results = [s for s in store.select("df", where=where, chunksize=chunksize)] result = concat(results) tm.assert_frame_equal(expected, result) # select w/iterator and where clause, single term, end of range - where = "index <= '%s'" % end_dt + where = "index <= '{end_dt}'".format(end_dt=end_dt) results = [s for s in store.select("df", where=where, chunksize=chunksize)] result = concat(results) tm.assert_frame_equal(expected, result) # select w/iterator and where clause, inclusive range - where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) + where = "index >= '{beg_dt}' & index <= '{end_dt}'".format( + beg_dt=beg_dt, end_dt=end_dt + ) results = [s for s in store.select("df", where=where, chunksize=chunksize)] result = concat(results) tm.assert_frame_equal(expected, result) @@ -3095,21 +3101,23 @@ def test_select_iterator_non_complete_8014(self): end_dt = expected.index[-2] # select w/iterator and where clause, single term, begin of range - where = "index >= '%s'" % beg_dt + where = "index >= '{beg_dt}'".format(beg_dt=beg_dt) results = [s for s in store.select("df", where=where, chunksize=chunksize)] result = concat(results) rexpected = expected[expected.index >= beg_dt] tm.assert_frame_equal(rexpected, result) # select w/iterator and where clause, single term, end of range - where = "index <= '%s'" % end_dt + where = "index <= '{end_dt}'".format(end_dt=end_dt) results = [s for s in store.select("df", where=where, chunksize=chunksize)] result = concat(results) rexpected = expected[expected.index <= end_dt] tm.assert_frame_equal(rexpected, result) # select w/iterator and where clause, inclusive range - where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) + where = "index >= '{beg_dt}' & index <= '{end_dt}'".format( + beg_dt=beg_dt, end_dt=end_dt + ) results = [s for s in store.select("df", where=where, chunksize=chunksize)] result = concat(results) rexpected = expected[ @@ -3127,7 +3135,7 @@ def test_select_iterator_non_complete_8014(self): end_dt = expected.index[-1] # select w/iterator and where clause, single term, begin of range - where = "index > '%s'" % end_dt + where = "index > '{end_dt}'".format(end_dt=end_dt) results = [s for s in store.select("df", where=where, chunksize=chunksize)] assert 0 == len(results) @@ -3149,14 +3157,14 @@ def test_select_iterator_many_empty_frames(self): end_dt = expected.index[chunksize - 1] # select w/iterator and where clause, single term, begin of range - where = "index >= '%s'" % beg_dt + where = "index >= '{beg_dt}'".format(beg_dt=beg_dt) results = [s for s in store.select("df", where=where, chunksize=chunksize)] result = concat(results) rexpected = expected[expected.index >= beg_dt] tm.assert_frame_equal(rexpected, result) # select w/iterator and where clause, single term, end of range - where = "index <= '%s'" % end_dt + where = "index <= '{end_dt}'".format(end_dt=end_dt) results = [s for s in store.select("df", where=where, chunksize=chunksize)] assert len(results) == 1 @@ -3165,7 +3173,9 @@ def test_select_iterator_many_empty_frames(self): tm.assert_frame_equal(rexpected, result) # select w/iterator and where clause, inclusive range - where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) + where = "index >= '{beg_dt}' & index <= '{end_dt}'".format( + beg_dt=beg_dt, end_dt=end_dt + ) results = [s for s in store.select("df", where=where, chunksize=chunksize)] # should be 1, is 10 @@ -3183,7 +3193,9 @@ def test_select_iterator_many_empty_frames(self): # return [] e.g. `for e in []: print True` never prints # True. - where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt) + where = "index <= '{beg_dt}' & index >= '{end_dt}'".format( + beg_dt=beg_dt, end_dt=end_dt + ) results = [s for s in store.select("df", where=where, chunksize=chunksize)] # should be [] @@ -3608,7 +3620,7 @@ def test_coordinates(self): _maybe_remove(store, "df1") _maybe_remove(store, "df2") df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) store.append("df1", df1, data_columns=["A", "B"]) store.append("df2", df2) @@ -3680,7 +3692,7 @@ def test_coordinates(self): def test_append_to_multiple(self): df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) df2["foo"] = "bar" df = concat([df1, df2], axis=1) @@ -3710,7 +3722,7 @@ def test_append_to_multiple(self): def test_append_to_multiple_dropna(self): df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan df = concat([df1, df2], axis=1) @@ -3730,7 +3742,7 @@ def test_append_to_multiple_dropna(self): ) def test_append_to_multiple_dropna_false(self): df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan df = concat([df1, df2], axis=1) @@ -3749,7 +3761,7 @@ def test_append_to_multiple_dropna_false(self): def test_select_as_multiple(self): df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) df2["foo"] = "bar" with ensure_clean_store(self.path) as store: @@ -3920,8 +3932,8 @@ def test_start_stop_fixed(self): def test_select_filter_corner(self): df = DataFrame(np.random.randn(50, 100)) - df.index = ["%.3d" % c for c in df.index] - df.columns = ["%.3d" % c for c in df.columns] + df.index = ["{c:3d}".format(c=c) for c in df.index] + df.columns = ["{c:3d}".format(c=c) for c in df.columns] with ensure_clean_store(self.path) as store: store.put("frame", df, format="table") @@ -4355,7 +4367,7 @@ def test_append_with_diff_col_name_types_raises_value_error(self): df5 = DataFrame({("1", 2, object): np.random.randn(10)}) with ensure_clean_store(self.path) as store: - name = "df_%s" % tm.rands(10) + name = "df_{}".format(tm.rands(10)) store.append(name, df) for d in (df2, df3, df4, df5): @@ -4775,16 +4787,16 @@ def test_query_long_float_literal(self): store.append("test", df, format="table", data_columns=True) cutoff = 1000000000.0006 - result = store.select("test", "A < %.4f" % cutoff) + result = store.select("test", "A < {cutoff:.4f}".format(cutoff=cutoff)) assert result.empty cutoff = 1000000000.0010 - result = store.select("test", "A > %.4f" % cutoff) + result = store.select("test", "A > {cutoff:.4f}".format(cutoff=cutoff)) expected = df.loc[[1, 2], :] tm.assert_frame_equal(expected, result) exact = 1000000000.0011 - result = store.select("test", "A == %.4f" % exact) + result = store.select("test", "A == {exact:.4f}".format(exact=exact)) expected = df.loc[[1], :] tm.assert_frame_equal(expected, result) @@ -5084,7 +5096,9 @@ def _compare_with_tz(self, a, b): a_e = a.loc[i, c] b_e = b.loc[i, c] if not (a_e == b_e and a_e.tz == b_e.tz): - raise AssertionError("invalid tz comparison [%s] [%s]" % (a_e, b_e)) + raise AssertionError( + "invalid tz comparison [{a_e}] [{b_e}]".format(a_e=a_e, b_e=b_e) + ) def test_append_with_timezones_dateutil(self): diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 9752b4c62aff7..6d06113dfc9ec 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -902,8 +902,8 @@ def test_computer_sales_page(self, datapath): def test_wikipedia_states_table(self, datapath): data = datapath("io", "data", "wikipedia_states.html") - assert os.path.isfile(data), "%r is not a file" % data - assert os.path.getsize(data), "%r is an empty file" % data + assert os.path.isfile(data), "{data!r} is not a file".format(data=data) + assert os.path.getsize(data), "{data!r} is an empty file".format(data=data) result = self.read_html(data, "Arizona", header=1)[0] assert result["sq mi"].dtype == np.dtype("float64") diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index fb1f657905be7..33a11087f622d 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -101,7 +101,7 @@ def check_arbitrary(a, b): @pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning") class TestPackers: def setup_method(self, method): - self.path = "__%s__.msg" % tm.rands(10) + self.path = "__{}__.msg".format(tm.rands(10)) def teardown_method(self, method): pass diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 7aba2a3677f84..076d0c9f947c7 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -48,7 +48,7 @@ def compare_element(result, expected, typ, version=None): return if typ.startswith("sp_"): - comparator = getattr(tm, "assert_%s_equal" % typ) + comparator = getattr(tm, "assert_{typ}_equal".format(typ=typ)) comparator(result, expected, exact_indices=False) elif typ == "timestamp": if expected is pd.NaT: @@ -57,7 +57,9 @@ def compare_element(result, expected, typ, version=None): assert result == expected assert result.freq == expected.freq else: - comparator = getattr(tm, "assert_%s_equal" % typ, tm.assert_almost_equal) + comparator = getattr( + tm, "assert_{typ}_equal".format(typ=typ), tm.assert_almost_equal + ) comparator(result, expected) @@ -242,7 +244,7 @@ def test_pickle_path_localpath(): @pytest.fixture def get_random_path(): - return "__%s__.pickle" % tm.rands(10) + return "__{}__.pickle".format(tm.rands(10)) class TestCompression: diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 347e280234f91..d8465a427eaea 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -217,7 +217,9 @@ def teardown_method(self, method): class MySQLMixIn(MixInBase): def drop_table(self, table_name): cur = self.conn.cursor() - cur.execute("DROP TABLE IF EXISTS %s" % sql._get_valid_mysql_name(table_name)) + cur.execute( + "DROP TABLE IF EXISTS {}".format(sql._get_valid_mysql_name(table_name)) + ) self.conn.commit() def _get_all_tables(self): @@ -237,7 +239,7 @@ def _close_conn(self): class SQLiteMixIn(MixInBase): def drop_table(self, table_name): self.conn.execute( - "DROP TABLE IF EXISTS %s" % sql._get_valid_sqlite_name(table_name) + "DROP TABLE IF EXISTS {}".format(sql._get_valid_sqlite_name(table_name)) ) self.conn.commit() @@ -405,7 +407,11 @@ def _load_raw_sql(self): def _count_rows(self, table_name): result = ( self._get_exec() - .execute("SELECT count(*) AS count_1 FROM %s" % table_name) + .execute( + "SELECT count(*) AS count_1 FROM {table_name}".format( + table_name=table_name + ) + ) .fetchone() ) return result[0] @@ -1201,7 +1207,7 @@ def _get_sqlite_column_type(self, schema, column): for col in schema.split("\n"): if col.split()[0].strip('""') == column: return col.split()[1] - raise ValueError("Column %s not found" % (column)) + raise ValueError("Column {column} not found".format(column=column)) def test_sqlite_type_mapping(self): @@ -2193,12 +2199,14 @@ def test_datetime_time(self): def _get_index_columns(self, tbl_name): ixs = sql.read_sql_query( "SELECT * FROM sqlite_master WHERE type = 'index' " - + "AND tbl_name = '%s'" % tbl_name, + + "AND tbl_name = '{tbl_name}'".format(tbl_name=tbl_name), self.conn, ) ix_cols = [] for ix_name in ixs.name: - ix_info = sql.read_sql_query("PRAGMA index_info(%s)" % ix_name, self.conn) + ix_info = sql.read_sql_query( + "PRAGMA index_info({ix_name})".format(ix_name=ix_name), self.conn + ) ix_cols.append(ix_info.name.tolist()) return ix_cols @@ -2211,11 +2219,15 @@ def test_transactions(self): self._transaction_test() def _get_sqlite_column_type(self, table, column): - recs = self.conn.execute("PRAGMA table_info(%s)" % table) + recs = self.conn.execute("PRAGMA table_info({table})".format(table=table)) for cid, name, ctype, not_null, default, pk in recs: if name == column: return ctype - raise ValueError("Table %s, column %s not found" % (table, column)) + raise ValueError( + "Table {table}, column {column} not found".format( + table=table, column=column + ) + ) def test_dtype(self): if self.flavor == "mysql": @@ -2285,7 +2297,7 @@ def test_illegal_names(self): sql.table_exists(weird_name, self.conn) df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name]) - c_tbl = "test_weird_col_name%d" % ndx + c_tbl = "test_weird_col_name{ndx:d}".format(ndx=ndx) df2.to_sql(c_tbl, self.conn) sql.table_exists(c_tbl, self.conn) @@ -2300,15 +2312,15 @@ def date_format(dt): _formatters = { - datetime: lambda dt: "'%s'" % date_format(dt), - str: lambda x: "'%s'" % x, - np.str_: lambda x: "'%s'" % x, - bytes: lambda x: "'%s'" % x, - float: lambda x: "%.8f" % x, - int: lambda x: "%s" % x, + datetime: "'{}'".format, + str: "'{}'".format, + np.str_: "'{}'".format, + bytes: "'{}'".format, + float: "{:.8f}".format, + int: "{:d}".format, type(None): lambda x: "NULL", - np.float64: lambda x: "%.10f" % x, - bool: lambda x: "'%s'" % x, + np.float64: "{:.10f}".format, + bool: "'{!s}'".format, } @@ -2490,7 +2502,7 @@ def test_if_exists(self): df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]}) df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]}) table_name = "table_if_exists" - sql_select = "SELECT * FROM %s" % table_name + sql_select = "SELECT * FROM {table_name}".format(table_name=table_name) def clean_up(test_table_to_drop): """ @@ -2778,7 +2790,7 @@ def test_if_exists(self): df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]}) df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]}) table_name = "table_if_exists" - sql_select = "SELECT * FROM %s" % table_name + sql_select = "SELECT * FROM {table_name}".format(table_name=table_name) def clean_up(test_table_to_drop): """ diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 4c5b1e66d0075..8b4a78e9195b5 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -856,10 +856,10 @@ def test_time_series_plot_color_with_empty_kwargs(self): def test_xticklabels(self): # GH11529 - s = Series(np.arange(10), index=["P%02d" % i for i in range(10)]) + s = Series(np.arange(10), index=["P{i:02d}".format(i=i) for i in range(10)]) _, ax = self.plt.subplots() ax = s.plot(xticks=[0, 3, 5, 9], ax=ax) - exp = ["P%02d" % i for i in [0, 3, 5, 9]] + exp = ["P{i:02d}".format(i=i) for i in [0, 3, 5, 9]] self._check_text_labels(ax.get_xticklabels(), exp) def test_custom_business_day_freq(self): diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index 2ced955652c21..30febe3d2cc83 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -101,7 +101,9 @@ def test_selection(self, index, freq, kind, kwargs): def test_annual_upsample_cases( self, targ, conv, meth, month, simple_period_range_series ): - ts = simple_period_range_series("1/1/1990", "12/31/1991", freq="A-%s" % month) + ts = simple_period_range_series( + "1/1/1990", "12/31/1991", freq="A-{month}".format(month=month) + ) result = getattr(ts.resample(targ, convention=conv), meth)() expected = result.to_timestamp(targ, how=conv) @@ -370,14 +372,16 @@ def test_resample_to_timestamps(self, simple_period_range_series): def test_resample_to_quarterly(self, simple_period_range_series): for month in MONTHS: - ts = simple_period_range_series("1990", "1992", freq="A-%s" % month) - quar_ts = ts.resample("Q-%s" % month).ffill() + ts = simple_period_range_series( + "1990", "1992", freq="A-{month}".format(month=month) + ) + quar_ts = ts.resample("Q-{month}".format(month=month)).ffill() stamps = ts.to_timestamp("D", how="start") qdates = period_range( ts.index[0].asfreq("D", "start"), ts.index[-1].asfreq("D", "end"), - freq="Q-%s" % month, + freq="Q-{month}".format(month=month), ) expected = stamps.reindex(qdates.to_timestamp("D", "s"), method="ffill") diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 16cfe3a469b34..305d7b9781634 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -790,7 +790,9 @@ def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix except KeyError: if how in ("left", "inner"): raise AssertionError( - "key %s should not have been in the join" % str(group_key) + "key {group_key!s} should not have been in the join".format( + group_key=group_key + ) ) _assert_all_na(l_joined, left.columns, join_col) @@ -802,7 +804,9 @@ def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix except KeyError: if how in ("right", "inner"): raise AssertionError( - "key %s should not have been in the join" % str(group_key) + "key {group_key!s} should not have been in the join".format( + group_key=group_key + ) ) _assert_all_na(r_joined, right.columns, join_col) diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 1b067c08d2e40..5b1f151daf219 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -339,8 +339,8 @@ def test_pairs(self): df = DataFrame(data) spec = { - "visitdt": ["visitdt%d" % i for i in range(1, 4)], - "wt": ["wt%d" % i for i in range(1, 4)], + "visitdt": ["visitdt{i:d}".format(i=i) for i in range(1, 4)], + "wt": ["wt{i:d}".format(i=i) for i in range(1, 4)], } result = lreshape(df, spec) @@ -529,8 +529,8 @@ def test_pairs(self): tm.assert_frame_equal(result, exp) spec = { - "visitdt": ["visitdt%d" % i for i in range(1, 3)], - "wt": ["wt%d" % i for i in range(1, 4)], + "visitdt": ["visitdt{i:d}".format(i=i) for i in range(1, 3)], + "wt": ["wt{i:d}".format(i=i) for i in range(1, 4)], } msg = "All column lists must be same length" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index 1c9e3e57bc310..149930059d868 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -166,7 +166,8 @@ def test_unicode(self, sparse): s = [e, eacute, eacute] res = get_dummies(s, prefix="letter", sparse=sparse) exp = DataFrame( - {"letter_e": [1, 0, 0], "letter_%s" % eacute: [0, 1, 1]}, dtype=np.uint8 + {"letter_e": [1, 0, 0], "letter_{eacute}".format(eacute=eacute): [0, 1, 1]}, + dtype=np.uint8, ) if sparse: exp = exp.apply(pd.SparseArray, fill_value=0) diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 771a67dfceaa8..4404b93e86218 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -307,7 +307,7 @@ def test_multiples(self): @pytest.mark.parametrize("month", MONTHS) def test_period_cons_quarterly(self, month): # bugs in scikits.timeseries - freq = "Q-%s" % month + freq = "Q-{month}".format(month=month) exp = Period("1989Q3", freq=freq) assert "1989Q3" in str(exp) stamp = exp.to_timestamp("D", how="end") @@ -321,7 +321,7 @@ def test_period_cons_quarterly(self, month): @pytest.mark.parametrize("month", MONTHS) def test_period_cons_annual(self, month): # bugs in scikits.timeseries - freq = "A-%s" % month + freq = "A-{month}".format(month=month) exp = Period("1989", freq=freq) stamp = exp.to_timestamp("D", how="end") + timedelta(days=30) p = Period(stamp, freq=freq) @@ -332,8 +332,8 @@ def test_period_cons_annual(self, month): @pytest.mark.parametrize("day", DAYS) @pytest.mark.parametrize("num", range(10, 17)) def test_period_cons_weekly(self, num, day): - daystr = "2011-02-%d" % num - freq = "W-%s" % day + daystr = "2011-02-{num}".format(num=num) + freq = "W-{day}".format(day=day) result = Period(daystr, freq=freq) expected = Period(daystr, freq="D").asfreq(freq) diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 7b0ff83aee5d4..401fc285424fe 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -576,7 +576,7 @@ def test_bounds_with_different_units(self): for date_string in out_of_bounds_dates: for unit in time_units: - dt64 = np.datetime64(date_string, dtype="M8[%s]" % unit) + dt64 = np.datetime64(date_string, dtype="M8[{unit}]".format(unit=unit)) with pytest.raises(ValueError): Timestamp(dt64) @@ -584,7 +584,7 @@ def test_bounds_with_different_units(self): for date_string in in_bounds_dates: for unit in time_units: - dt64 = np.datetime64(date_string, dtype="M8[%s]" % unit) + dt64 = np.datetime64(date_string, dtype="M8[{unit}]".format(unit=unit)) Timestamp(dt64) def test_min_valid(self): diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 67373686d6728..32d32a5d14fb2 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -99,7 +99,7 @@ def test_argsort(self, datetime_series): assert issubclass(argsorted.dtype.type, np.integer) # GH 2967 (introduced bug in 0.11-dev I think) - s = Series([Timestamp("201301%02d" % (i + 1)) for i in range(5)]) + s = Series([Timestamp("201301{i:02d}".format(i=i)) for i in range(1, 6)]) assert s.dtype == "datetime64[ns]" shifted = s.shift(-1) assert shifted.dtype == "datetime64[ns]" diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 2097264ba5e78..2870677e42d50 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -155,7 +155,9 @@ def test_constructor_subclass_dict(self): def test_constructor_ordereddict(self): # GH3283 - data = OrderedDict(("col%s" % i, np.random.random()) for i in range(12)) + data = OrderedDict( + ("col{i}".format(i=i), np.random.random()) for i in range(12) + ) series = self.series_klass(data) expected = self.series_klass(list(data.values()), list(data.keys())) diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 6527d41eac841..55a37da6b663f 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -967,7 +967,7 @@ def test_rename(self, float_frame): ) tm.assert_sp_frame_equal(result, expected) - result = float_frame.rename(columns=lambda x: "%s%d" % (x, 1)) + result = float_frame.rename(columns="{}1".format) data = { "A1": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], "B1": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index eb217283c7a83..ad4c898b004ac 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -619,7 +619,9 @@ def _check_inplace_op(iop, op): inplace_ops = ["add", "sub", "mul", "truediv", "floordiv", "pow"] for op in inplace_ops: - _check_inplace_op(getattr(operator, "i%s" % op), getattr(operator, op)) + _check_inplace_op( + getattr(operator, "i{op}".format(op=op)), getattr(operator, op) + ) @pytest.mark.parametrize( "values, op, fill_value", diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index a7281e002cc5c..4070624985068 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -81,7 +81,7 @@ def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=Tr assert expected.dtype.kind == "f" assert_func(expected, result) except Exception: - pprint_thing("Failed test with operator %r" % op.__name__) + pprint_thing("Failed test with operator {op.__name__!r}".format(op=op)) raise def test_integer_arithmetic(self): @@ -129,8 +129,8 @@ def run_binary( assert not used_numexpr, "Used numexpr unexpectedly." assert_func(expected, result) except Exception: - pprint_thing("Failed test with operation %r" % arith) - pprint_thing("test_flex was %r" % test_flex) + pprint_thing("Failed test with operation {arith!r}".format(arith=arith)) + pprint_thing("test_flex was {test_flex!r}".format(test_flex=test_flex)) raise def run_frame(self, df, other, binary_comp=None, run_binary=True, **kwargs): diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index f6e936630f6be..21ab28c94c978 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -179,9 +179,9 @@ def check_fun_data( self.check_results(targ, res, axis, check_dtype=check_dtype) except BaseException as exc: exc.args += ( - "axis: %s of %s" % (axis, testarval.ndim - 1), - "skipna: %s" % skipna, - "kwargs: %s" % kwargs, + "axis: {axis} of {of}".format(axis=axis, of=testarval.ndim - 1), + "skipna: {skipna}".format(skipna=skipna), + "kwargs: {kwargs}".format(kwargs=kwargs), ) raise @@ -234,9 +234,9 @@ def check_fun( ) except BaseException as exc: exc.args += ( - "testar: %s" % testar, - "targar: %s" % targar, - "targarnan: %s" % targarnan, + "testar: {testar}".format(testar=testar), + "targar: {targar}".format(targar=targar), + "targarnan: {targarnan}".format(targarnan=targarnan), ) raise @@ -712,7 +712,7 @@ def check_nancomp(self, checkfun, targ0): res2 = checkfun(arr_float_nan, arr_nan_float1) tm.assert_numpy_array_equal(targ2, res2, check_dtype=False) except Exception as exc: - exc.args += ("ndim: %s" % arr_float.ndim,) + exc.args += ("ndim: {arr_float.ndim}".format(arr_float=arr_float),) raise try: @@ -760,7 +760,7 @@ def check_bool(self, func, value, correct, *args, **kwargs): else: assert not res0 except BaseException as exc: - exc.args += ("dim: %s" % getattr(value, "ndim", value),) + exc.args += ("dim: {}".format(getattr(value, "ndim", value)),) raise if not hasattr(value, "ndim"): break diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 2df5460a05953..fca88ff3ce8ce 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -2039,7 +2039,7 @@ def get_result(obj, window, min_periods=None, center=False): tm.assert_series_equal(result, expected) # shifter index - s = ["x%d" % x for x in range(12)] + s = ["x{x:d}".format(x=x) for x in range(12)] if has_min_periods: minp = 10 diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py index 4c8f6253cdf7b..50844aabb2c88 100644 --- a/pandas/tests/tseries/frequencies/test_inference.py +++ b/pandas/tests/tseries/frequencies/test_inference.py @@ -178,7 +178,7 @@ def test_infer_freq_delta(base_delta_code_pair, count): inc = base_delta * count index = DatetimeIndex([b + inc * j for j in range(3)]) - exp_freq = "%d%s" % (count, code) if count > 1 else code + exp_freq = "{count:d}{code}".format(count=count, code=code) if count > 1 else code assert frequencies.infer_freq(index) == exp_freq diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py index 079fcc36ff3ee..fbf4454109ec0 100644 --- a/pandas/tests/tseries/offsets/common.py +++ b/pandas/tests/tseries/offsets/common.py @@ -13,14 +13,18 @@ def assert_offset_equal(offset, base, expected): assert actual_apply == expected except AssertionError: raise AssertionError( - "\nExpected: %s\nActual: %s\nFor Offset: %s)" - "\nAt Date: %s" % (expected, actual, offset, base) + "\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})" + "\nAt Date: {base}".format( + expected=expected, actual=actual, offset=offset, base=base + ) ) def assert_onOffset(offset, date, expected): actual = offset.onOffset(date) assert actual == expected, ( - "\nExpected: %s\nActual: %s\nFor Offset: %s)" - "\nAt Date: %s" % (expected, actual, offset, date) + "\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})" + "\nAt Date: {date}".format( + expected=expected, actual=actual, offset=offset, date=date + ) ) diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py index c24d917a5e454..8b1aaafb94e0b 100644 --- a/pandas/tests/tseries/offsets/test_fiscal.py +++ b/pandas/tests/tseries/offsets/test_fiscal.py @@ -79,10 +79,10 @@ def test_get_offset(): for name, expected in pairs: offset = get_offset(name) - assert offset == expected, "Expected %r to yield %r (actual: %r)" % ( - name, - expected, - offset, + assert ( + offset == expected + ), "Expected {name!r} to yield {expected!r} (actual: {offset!r})".format( + name=name, expected=expected, offset=offset ) diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 2654d83ee0c52..1abc8aece5ec9 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -3969,10 +3969,10 @@ def test_get_offset(): for name, expected in pairs: offset = get_offset(name) - assert offset == expected, "Expected %r to yield %r (actual: %r)" % ( - name, - expected, - offset, + assert ( + offset == expected + ), "Expected {name!r} to yield {expected!r} (actual: {offset!r})".format( + name=name, expected=expected, offset=offset )
xref #16130
https://api.github.com/repos/pandas-dev/pandas/pulls/27324
2019-07-10T15:35:07Z
2019-07-10T16:17:55Z
2019-07-10T16:17:55Z
2019-07-10T16:56:25Z
BUG: Fix insertion of wrong-dtypes NaT into Series[m8ns]
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index ba2838d59f814..7000c07b1f5a6 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -544,6 +544,10 @@ cpdef convert_scalar(ndarray arr, object value): pass elif isinstance(value, timedelta): return Timedelta(value).value + elif util.is_datetime64_object(value): + # exclude np.datetime64("NaT") which would otherwise be picked up + # by the `value != value check below + pass elif value is None or value != value: return NPY_NAT elif isinstance(value, str): diff --git a/pandas/core/series.py b/pandas/core/series.py index 6a58b1ea6f82d..dc9a42b7071cb 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -46,6 +46,7 @@ ABCSparseSeries, ) from pandas.core.dtypes.missing import ( + is_valid_nat_for_dtype, isna, na_value_for_dtype, notna, @@ -1198,13 +1199,15 @@ def setitem(key, value): pass elif is_timedelta64_dtype(self.dtype): # reassign a null value to iNaT - if isna(value): + if is_valid_nat_for_dtype(value, self.dtype): + # exclude np.datetime64("NaT") value = iNaT try: self.index._engine.set_value(self._values, key, value) return - except TypeError: + except (TypeError, ValueError): + # ValueError appears in only some builds in CI pass self.loc[key] = value diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 6ff878f07da84..d73be76795c88 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -654,6 +654,36 @@ def test_timedelta_assignment(): tm.assert_series_equal(s, expected) +@pytest.mark.parametrize( + "nat_val,should_cast", + [ + (pd.NaT, True), + (np.timedelta64("NaT", "ns"), True), + (np.datetime64("NaT", "ns"), False), + ], +) +def test_td64_series_assign_nat(nat_val, should_cast): + # some nat-like values should be cast to timedelta64 when inserting + # into a timedelta64 series. Others should coerce to object + # and retain their dtypes. + base = pd.Series([0, 1, 2], dtype="m8[ns]") + expected = pd.Series([pd.NaT, 1, 2], dtype="m8[ns]") + if not should_cast: + expected = expected.astype(object) + + ser = base.copy(deep=True) + ser[0] = nat_val + tm.assert_series_equal(ser, expected) + + ser = base.copy(deep=True) + ser.loc[0] = nat_val + tm.assert_series_equal(ser, expected) + + ser = base.copy(deep=True) + ser.iloc[0] = nat_val + tm.assert_series_equal(ser, expected) + + @pytest.mark.parametrize( "td", [
Broken off from #27311 to troubleshoot build-specific failures. Also re-wrote test to parametrize and be more succinct
https://api.github.com/repos/pandas-dev/pandas/pulls/27323
2019-07-10T14:52:06Z
2019-07-17T22:16:31Z
2019-07-17T22:16:31Z
2019-07-17T22:18:06Z
BUG: fix inserting tz-aware datetime to Series, closes #12862
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index ebe8b4770f6aa..2e7d5e95349be 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1053,6 +1053,7 @@ Indexing - Bug in :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` on a :class:`DataFrame` with a single timezone-aware datetime64[ns] column incorrectly returning a scalar instead of a :class:`Series` (:issue:`27110`) - Bug in :class:`CategoricalIndex` and :class:`Categorical` incorrectly raising ``ValueError`` instead of ``TypeError`` when a list is passed using the ``in`` operator (``__contains__``) (:issue:`21729`) - Bug in setting a new value in a :class:`Series` with a :class:`Timedelta` object incorrectly casting the value to an integer (:issue:`22717`) +- Bug in :class:`Series` setting a new key (``__setitem__``) with a timezone-aware datetime incorrectly raising ``ValueError`` (:issue:`12862`) - Missing diff --git a/pandas/core/series.py b/pandas/core/series.py index 4b78907e66106..acb0826953508 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1268,6 +1268,13 @@ def _set_with(self, key, value): except Exception: pass + if is_scalar(key) and not is_integer(key) and key not in self.index: + # GH#12862 adding an new key to the Series + # Note: have to exclude integers because that is ambiguously + # position-based + self.loc[key] = value + return + if is_scalar(key): key = [key] elif not isinstance(key, (list, Series, np.ndarray)): diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index d749e697c8282..90d1b0b1e0198 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -802,7 +802,7 @@ def test_loc_setitem_with_scalar_index(self, indexer, value): assert is_scalar(result) and result == "Z" - def test_loc_coerceion(self): + def test_loc_coercion(self): # 12411 df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC"), pd.NaT]}) @@ -838,6 +838,26 @@ def test_loc_coerceion(self): result = df.iloc[3:] tm.assert_series_equal(result.dtypes, expected) + def test_setitem_new_key_tz(self): + # GH#12862 should not raise on assigning the second value + vals = [ + pd.to_datetime(42).tz_localize("UTC"), + pd.to_datetime(666).tz_localize("UTC"), + ] + expected = pd.Series(vals, index=["foo", "bar"]) + + ser = pd.Series() + ser["foo"] = vals[0] + ser["bar"] = vals[1] + + tm.assert_series_equal(ser, expected) + + ser = pd.Series() + ser.loc["foo"] = vals[0] + ser.loc["bar"] = vals[1] + + tm.assert_series_equal(ser, expected) + def test_loc_non_unique(self): # GH3659 # non-unique indexer with loc slice diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 1fb1dd3bb998a..6ff878f07da84 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -523,7 +523,7 @@ def test_setitem_with_tz_dst(): tm.assert_series_equal(s, exp) -def test_categorial_assigning_ops(): +def test_categorical_assigning_ops(): orig = Series(Categorical(["b", "b"], categories=["a", "b"])) s = orig.copy() s[:] = "a"
- [x] closes #12862 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27322
2019-07-10T14:38:43Z
2019-07-11T16:39:34Z
2019-07-11T16:39:34Z
2019-07-11T16:44:28Z
BUG: Consistent division by zero behavior for Index/Series
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 042c97a0c98b1..ebe8b4770f6aa 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1009,6 +1009,7 @@ Numeric - Bug in :meth:`~pandas.eval` when comparing floats with scalar operators, for example: ``x < -0.1`` (:issue:`25928`) - Fixed bug where casting all-boolean array to integer extension array failed (:issue:`25211`) - Bug in ``divmod`` with a :class:`Series` object containing zeros incorrectly raising ``AttributeError`` (:issue:`26987`) +- Inconsistency in :class:`Series` floor-division (`//`) and ``divmod`` filling positive//zero with ``NaN`` instead of ``Inf`` (:issue:`27321`) - Conversion diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index d735ab3ad2535..ee5c670364485 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -234,32 +234,6 @@ def _gen_eval_kwargs(name): return kwargs -def _gen_fill_zeros(name): - """ - Find the appropriate fill value to use when filling in undefined values - in the results of the given operation caused by operating on - (generally dividing by) zero. - - Parameters - ---------- - name : str - - Returns - ------- - fill_value : {None, np.nan, np.inf} - """ - name = name.strip("__") - if "div" in name: - # truediv, floordiv, and reversed variants - fill_value = np.inf - elif "mod" in name: - # mod, rmod - fill_value = np.nan - else: - fill_value = None - return fill_value - - def _get_frame_op_default_axis(name): """ Only DataFrame cares about default_axis, specifically: @@ -1632,7 +1606,6 @@ def _arith_method_SERIES(cls, op, special): str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) - fill_zeros = _gen_fill_zeros(op_name) construct_result = ( _construct_divmod_result if op in [divmod, rdivmod] else _construct_result ) @@ -1663,7 +1636,7 @@ def na_op(x, y): except TypeError: result = masked_arith_op(x, y, op) - return missing.dispatch_fill_zeros(op, x, y, result, fill_zeros) + return missing.dispatch_fill_zeros(op, x, y, result) def wrapper(left, right): if isinstance(right, ABCDataFrame): @@ -2154,7 +2127,6 @@ def _arith_method_FRAME(cls, op, special): str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) - fill_zeros = _gen_fill_zeros(op_name) default_axis = _get_frame_op_default_axis(op_name) def na_op(x, y): @@ -2165,7 +2137,7 @@ def na_op(x, y): except TypeError: result = masked_arith_op(x, y, op) - return missing.dispatch_fill_zeros(op, x, y, result, fill_zeros) + return missing.dispatch_fill_zeros(op, x, y, result) if op_name in _op_descriptions: # i.e. include "add" but not "__add__" diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 608c2550994f1..3698958261555 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -27,7 +27,7 @@ from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_scalar -from .roperator import rdivmod +from .roperator import rdivmod, rfloordiv, rmod def fill_zeros(result, x, y, name, fill): @@ -85,7 +85,7 @@ def fill_zeros(result, x, y, name, fill): return result -def mask_zero_div_zero(x, y, result, copy=False): +def mask_zero_div_zero(x, y, result): """ Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes of the numerator or the denominator. @@ -95,9 +95,6 @@ def mask_zero_div_zero(x, y, result, copy=False): x : ndarray y : ndarray result : ndarray - copy : bool (default False) - Whether to always create a new array or try to fill in the existing - array if possible. Returns ------- @@ -113,10 +110,19 @@ def mask_zero_div_zero(x, y, result, copy=False): >>> mask_zero_div_zero(x, y, result) array([ inf, nan, -inf]) """ + if not isinstance(result, np.ndarray): + # FIXME: SparseArray would raise TypeError with np.putmask + return result + if is_scalar(y): y = np.array(y) zmask = y == 0 + + if isinstance(zmask, bool): + # FIXME: numpy did not evaluate pointwise, seen in docs build + return result + if zmask.any(): shape = result.shape @@ -125,12 +131,13 @@ def mask_zero_div_zero(x, y, result, copy=False): zpos_mask = zmask & ~zneg_mask nan_mask = (zmask & (x == 0)).ravel() - neginf_mask = ((zpos_mask & (x < 0)) | (zneg_mask & (x > 0))).ravel() - posinf_mask = ((zpos_mask & (x > 0)) | (zneg_mask & (x < 0))).ravel() + with np.errstate(invalid="ignore"): + neginf_mask = ((zpos_mask & (x < 0)) | (zneg_mask & (x > 0))).ravel() + posinf_mask = ((zpos_mask & (x > 0)) | (zneg_mask & (x < 0))).ravel() if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN - result = result.astype("float64", copy=copy).ravel() + result = result.astype("float64", copy=False).ravel() np.putmask(result, nan_mask, np.nan) np.putmask(result, posinf_mask, np.inf) @@ -157,36 +164,45 @@ def dispatch_missing(op, left, right, result): ------- result : ndarray """ - opstr = "__{opname}__".format(opname=op.__name__).replace("____", "__") if op is operator.floordiv: # Note: no need to do this for truediv; in py3 numpy behaves the way # we want. result = mask_zero_div_zero(left, right, result) elif op is operator.mod: - result = fill_zeros(result, left, right, opstr, np.nan) + result = fill_zeros(result, left, right, "__mod__", np.nan) elif op is divmod: res0 = mask_zero_div_zero(left, right, result[0]) - res1 = fill_zeros(result[1], left, right, opstr, np.nan) + res1 = fill_zeros(result[1], left, right, "__divmod__", np.nan) result = (res0, res1) return result # FIXME: de-duplicate with dispatch_missing -def dispatch_fill_zeros(op, left, right, result, fill_value): +def dispatch_fill_zeros(op, left, right, result): """ Call fill_zeros with the appropriate fill value depending on the operation, with special logic for divmod and rdivmod. """ if op is divmod: result = ( - fill_zeros(result[0], left, right, "__floordiv__", np.inf), + mask_zero_div_zero(left, right, result[0]), fill_zeros(result[1], left, right, "__mod__", np.nan), ) elif op is rdivmod: result = ( - fill_zeros(result[0], left, right, "__rfloordiv__", np.inf), + mask_zero_div_zero(right, left, result[0]), fill_zeros(result[1], left, right, "__rmod__", np.nan), ) - else: - result = fill_zeros(result, left, right, op.__name__, fill_value) + elif op is operator.floordiv: + # Note: no need to do this for truediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(left, right, result) + elif op is op is rfloordiv: + # Note: no need to do this for rtruediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(right, left, result) + elif op is operator.mod: + result = fill_zeros(result, left, right, "__mod__", np.nan) + elif op is rmod: + result = fill_zeros(result, left, right, "__rmod__", np.nan) return result diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 1fbecbab469e4..2b23790e4ccd3 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -331,7 +331,12 @@ def test_ser_divmod_zero(self, dtype1, any_real_dtype): left = pd.Series([1, 1]).astype(dtype1) right = pd.Series([0, 2]).astype(dtype2) + # GH#27321 pandas convention is to set 1 // 0 to np.inf, as opposed + # to numpy which sets to np.nan; patch `expected[0]` below expected = left // right, left % right + expected = list(expected) + expected[0] = expected[0].astype(np.float64) + expected[0][0] = np.inf result = divmod(left, right) tm.assert_series_equal(result[0], expected[0]) @@ -881,17 +886,16 @@ def check(series, other): _check_op(series, other, operator.pow, pos_only=True) - _check_op(series, other, lambda x, y: operator.add(y, x)) - _check_op(series, other, lambda x, y: operator.sub(y, x)) - _check_op(series, other, lambda x, y: operator.truediv(y, x)) - _check_op(series, other, lambda x, y: operator.floordiv(y, x)) - _check_op(series, other, lambda x, y: operator.mul(y, x)) - _check_op(series, other, lambda x, y: operator.pow(y, x), pos_only=True) - _check_op(series, other, lambda x, y: operator.mod(y, x)) + _check_op(series, other, ops.radd) + _check_op(series, other, ops.rsub) + _check_op(series, other, ops.rtruediv) + _check_op(series, other, ops.rfloordiv) + _check_op(series, other, ops.rmul) + _check_op(series, other, ops.rpow, pos_only=True) + _check_op(series, other, ops.rmod) tser = tm.makeTimeSeries().rename("ts") check(tser, tser * 2) - check(tser, tser * 0) check(tser, tser[::2]) check(tser, 5) @@ -931,13 +935,9 @@ def check(series, other): tser = tm.makeTimeSeries().rename("ts") check(tser, tser * 2) - check(tser, tser * 0) check(tser, tser[::2]) check(tser, 5) - @pytest.mark.xfail( - reason="Series division does not yet fill 1/0 consistently; Index does." - ) def test_series_divmod_zero(self): # Check that divmod uses pandas convention for division by zero, # which does not match numpy. @@ -950,8 +950,8 @@ def test_series_divmod_zero(self): other = tser * 0 result = divmod(tser, other) - exp1 = pd.Series([np.inf] * len(tser), index=tser.index) - exp2 = pd.Series([np.nan] * len(tser), index=tser.index) + exp1 = pd.Series([np.inf] * len(tser), index=tser.index, name="ts") + exp2 = pd.Series([np.nan] * len(tser), index=tser.index, name="ts") tm.assert_series_equal(result[0], exp1) tm.assert_series_equal(result[1], exp2) diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index 0f8f3d261c3b3..57e5a35d99e48 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -49,6 +49,12 @@ def _check_numeric_ops(self, a, b, a_dense, b_dense, mix, op): else: expected = op(a_dense, b_dense) + if op in [operator.floordiv, ops.rfloordiv]: + # Series sets 1//0 to np.inf, which SparseArray does not do (yet) + mask = np.isinf(expected) + if mask.any(): + expected[mask] = np.nan + self._assert(result, expected) def _check_bool_result(self, res): diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py index dfdb08fa78cbc..8fbfb4c12f4b2 100644 --- a/pandas/tests/arrays/test_integer.py +++ b/pandas/tests/arrays/test_integer.py @@ -179,6 +179,10 @@ def _check_op_float(self, result, expected, mask, s, op_name, other): # check comparisons that are resulting in float dtypes expected[mask] = np.nan + if "floordiv" in op_name: + # Series op sets 1//0 to np.inf, which IntegerArray does not do (yet) + mask2 = np.isinf(expected) & np.isnan(result) + expected[mask2] = np.nan tm.assert_series_equal(result, expected) def _check_op_integer(self, result, expected, mask, s, op_name, other): diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 64c81a8c10985..5682c74a8b692 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -1,4 +1,5 @@ import operator +from types import LambdaType import numpy as np from numpy import nan @@ -9,6 +10,7 @@ import pandas as pd from pandas import DataFrame, Series, bdate_range, compat +from pandas.core import ops from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.sparse import frame as spf from pandas.core.sparse.api import ( @@ -424,6 +426,13 @@ def _compare_to_dense(a, b, da, db, op): sparse_result = op(a, b) dense_result = op(da, db) + # catch lambdas but not non-lambdas e.g. operator.add + if op in [operator.floordiv, ops.rfloordiv] or isinstance(op, LambdaType): + # GH#27231 Series sets 1//0 to np.inf, which SparseArray + # does not do (yet) + mask = np.isinf(dense_result) & ~np.isinf(sparse_result.to_dense()) + dense_result[mask] = np.nan + fill = sparse_result.default_fill_value dense_result = dense_result.to_sparse(fill_value=fill) tm.assert_sp_frame_equal(sparse_result, dense_result, exact_indices=False) @@ -436,7 +445,6 @@ def _compare_to_dense(a, b, da, db, op): ) opnames = ["add", "sub", "mul", "truediv", "floordiv"] - ops = [getattr(operator, name) for name in opnames] fidx = frame.index @@ -466,6 +474,7 @@ def _compare_to_dense(a, b, da, db, op): f = lambda a, b: getattr(a, op)(b, axis="index") _compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f) + # FIXME: dont leave commented-out # rops are not implemented # _compare_to_dense(s, frame, s.to_dense(), # frame.to_dense(), f) @@ -479,13 +488,14 @@ def _compare_to_dense(a, b, da, db, op): frame.xs(fidx[5])[:2], ] - for op in ops: + for name in opnames: + op = getattr(operator, name) for s in series: _compare_to_dense(frame, s, frame.to_dense(), s, op) _compare_to_dense(s, frame, s, frame.to_dense(), op) # it works! - result = frame + frame.loc[:, ["A", "B"]] # noqa + frame + frame.loc[:, ["A", "B"]] def test_op_corners(self, float_frame, empty_frame): empty = empty_frame + empty_frame diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index ad4c898b004ac..fb668f3d0e76d 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -12,6 +12,7 @@ import pandas as pd from pandas import DataFrame, Series, SparseDtype, SparseSeries, bdate_range, isna +from pandas.core import ops from pandas.core.reshape.util import cartesian_product import pandas.core.sparse.frame as spf from pandas.tests.series.test_api import SharedWithSparse @@ -563,6 +564,10 @@ def _check_op(a, b, op): adense = a.to_dense() if isinstance(a, SparseSeries) else a bdense = b.to_dense() if isinstance(b, SparseSeries) else b dense_result = op(adense, bdense) + if "floordiv" in op.__name__: + # Series sets 1//0 to np.inf, which SparseSeries does not do (yet) + mask = np.isinf(dense_result) + dense_result[mask] = np.nan tm.assert_almost_equal(sp_result.to_dense(), dense_result) def check(a, b): @@ -572,11 +577,11 @@ def check(a, b): _check_op(a, b, operator.floordiv) _check_op(a, b, operator.mul) - _check_op(a, b, lambda x, y: operator.add(y, x)) - _check_op(a, b, lambda x, y: operator.sub(y, x)) - _check_op(a, b, lambda x, y: operator.truediv(y, x)) - _check_op(a, b, lambda x, y: operator.floordiv(y, x)) - _check_op(a, b, lambda x, y: operator.mul(y, x)) + _check_op(a, b, ops.radd) + _check_op(a, b, ops.rsub) + _check_op(a, b, ops.rtruediv) + _check_op(a, b, ops.rfloordiv) + _check_op(a, b, ops.rmul) # FIXME: don't leave commented-out # NaN ** 0 = 1 in C?
- [x] closes #13843 We have two very-similar functions for masking division by zero for Index vs Series. The Index version is the more correct of the two, so I'm trying to make the Series one behave more like the Index one, eventually just using the Index one for both. The hard part here is that IntegerArray, SparseArray, and SparseSeries have their own arithmetic implementations, and we test that they behave the same as Series. Several attempts to patch these implementations have failed (and motivated #27302, which this is rebased on top of), so for now this PR patches the tests instead of the implementations. I think the IntegerArray implementation should be relatively easy to patch. SparseArray I'll need some help (cc @TomAugspurger) with `__setitem__` and/or `putmask`.
https://api.github.com/repos/pandas-dev/pandas/pulls/27321
2019-07-10T14:27:59Z
2019-07-11T14:32:15Z
2019-07-11T14:32:14Z
2019-07-11T14:34:54Z
TYPING: Partial typing of Categorical
diff --git a/pandas/_typing.py b/pandas/_typing.py index 45c43fa958caa..837a7a89e0b83 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -1,5 +1,5 @@ from pathlib import Path -from typing import IO, TYPE_CHECKING, AnyStr, TypeVar, Union +from typing import IO, TYPE_CHECKING, AnyStr, Optional, TypeVar, Union import numpy as np @@ -27,3 +27,4 @@ FrameOrSeries = TypeVar("FrameOrSeries", "Series", "DataFrame") Scalar = Union[str, int, float] Axis = Union[str, int] +Ordered = Optional[bool] diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 6200cd14663f8..50ea8d2f4d7a7 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1,5 +1,6 @@ from shutil import get_terminal_size import textwrap +from typing import Type, Union, cast from warnings import warn import numpy as np @@ -47,6 +48,7 @@ from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, notna +from pandas._typing import ArrayLike, Dtype, Ordered from pandas.core import ops from pandas.core.accessor import PandasDelegate, delegate_names import pandas.core.algorithms as algorithms @@ -473,7 +475,7 @@ def categories(self, categories): self._dtype = new_dtype @property - def ordered(self): + def ordered(self) -> Ordered: """ Whether the categories have an ordered relationship. """ @@ -487,11 +489,11 @@ def dtype(self) -> CategoricalDtype: return self._dtype @property - def _ndarray_values(self): + def _ndarray_values(self) -> np.ndarray: return self.codes @property - def _constructor(self): + def _constructor(self) -> Type["Categorical"]: return Categorical @classmethod @@ -502,7 +504,7 @@ def _formatter(self, boxed=False): # Defer to CategoricalFormatter's formatter. return None - def copy(self): + def copy(self) -> "Categorical": """ Copy constructor. """ @@ -510,7 +512,7 @@ def copy(self): values=self._codes.copy(), dtype=self.dtype, fastpath=True ) - def astype(self, dtype, copy=True): + def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: """ Coerce this type to another dtype @@ -523,6 +525,8 @@ def astype(self, dtype, copy=True): object is returned. """ if is_categorical_dtype(dtype): + dtype = cast(Union[str, CategoricalDtype], dtype) + # GH 10696/18593 dtype = self.dtype.update_dtype(dtype) self = self.copy() if copy else self @@ -532,27 +536,27 @@ def astype(self, dtype, copy=True): return np.array(self, dtype=dtype, copy=copy) @cache_readonly - def ndim(self): + def ndim(self) -> int: """ Number of dimensions of the Categorical """ return self._codes.ndim @cache_readonly - def size(self): + def size(self) -> int: """ return the len of myself """ return len(self) @cache_readonly - def itemsize(self): + def itemsize(self) -> int: """ return the size of a single category """ return self.categories.itemsize - def tolist(self): + def tolist(self) -> list: """ Return a list of the values. @@ -565,7 +569,7 @@ def tolist(self): to_list = tolist @property - def base(self): + def base(self) -> None: """ compat, we are always our own object """ @@ -712,8 +716,6 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None): return cls(codes, dtype=dtype, fastpath=True) - _codes = None - def _get_codes(self): """ Get the codes. @@ -773,7 +775,7 @@ def _set_categories(self, categories, fastpath=False): self._dtype = new_dtype - def _set_dtype(self, dtype): + def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical": """ Internal method for directly updating the CategoricalDtype diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index bba551bd30a2d..ee1866e60644b 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -1,6 +1,6 @@ """ define extension dtypes """ import re -from typing import Any, Dict, List, Optional, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast import warnings import numpy as np @@ -11,6 +11,8 @@ from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCDateOffset, ABCIndexClass +from pandas._typing import Ordered + from .base import ExtensionDtype from .inference import is_bool, is_list_like @@ -20,9 +22,6 @@ # CategoricalDtype constructor to detect when ordered=None is explicitly passed ordered_sentinel = object() # type: object -# TODO(GH26403): Replace with Optional[bool] or bool -OrderedType = Union[None, bool, object] - def register_extension_dtype(cls: Type[ExtensionDtype],) -> Type[ExtensionDtype]: """ @@ -222,7 +221,11 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): _metadata = ("categories", "ordered", "_ordered_from_sentinel") _cache = {} # type: Dict[str_type, PandasExtensionDtype] - def __init__(self, categories=None, ordered: OrderedType = ordered_sentinel): + def __init__( + self, categories=None, ordered: Union[Ordered, object] = ordered_sentinel + ): + # TODO(GH26403): Set type of ordered to Ordered + ordered = cast(Ordered, ordered) self._finalize(categories, ordered, fastpath=False) @classmethod @@ -235,7 +238,7 @@ def _from_fastpath( @classmethod def _from_categorical_dtype( - cls, dtype: "CategoricalDtype", categories=None, ordered: OrderedType = None + cls, dtype: "CategoricalDtype", categories=None, ordered: Ordered = None ) -> "CategoricalDtype": if categories is ordered is None: return dtype @@ -336,9 +339,7 @@ def _from_values_or_dtype( return dtype - def _finalize( - self, categories, ordered: OrderedType, fastpath: bool = False - ) -> None: + def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None: if ordered is not None and ordered is not ordered_sentinel: self.validate_ordered(ordered) @@ -423,7 +424,7 @@ def __repr__(self): return tpl.format(data, self._ordered) @staticmethod - def _hash_categories(categories, ordered: OrderedType = True) -> int: + def _hash_categories(categories, ordered: Ordered = True) -> int: from pandas.core.util.hashing import ( hash_array, _combine_hash_arrays, @@ -475,7 +476,7 @@ def construct_array_type(cls): return Categorical @staticmethod - def validate_ordered(ordered: OrderedType) -> None: + def validate_ordered(ordered: Ordered) -> None: """ Validates that we have a valid ordered parameter. If it is not a boolean, a TypeError will be raised. @@ -529,7 +530,9 @@ def validate_categories(categories, fastpath: bool = False): return categories - def update_dtype(self, dtype: "CategoricalDtype") -> "CategoricalDtype": + def update_dtype( + self, dtype: Union[str_type, "CategoricalDtype"] + ) -> "CategoricalDtype": """ Returns a CategoricalDtype with categories and ordered taken from dtype if specified, otherwise falling back to self if unspecified @@ -551,6 +554,9 @@ def update_dtype(self, dtype: "CategoricalDtype") -> "CategoricalDtype": "got {dtype!r}" ).format(dtype=dtype) raise ValueError(msg) + else: + # from here on, dtype is a CategoricalDtype + dtype = cast(CategoricalDtype, dtype) # dtype is CDT: keep current categories/ordered if None new_categories = dtype.categories @@ -583,7 +589,7 @@ def categories(self): return self._categories @property - def ordered(self) -> OrderedType: + def ordered(self) -> Ordered: """ Whether the categories have an ordered relationship. """
Adds some typing to ``Categorical``. This PR does the simpler typing of ``Categorical``. In addition there are some more thorny typing that I've stayed away from for now to see if others can implement the basic types (list-likes etc.).
https://api.github.com/repos/pandas-dev/pandas/pulls/27318
2019-07-10T06:29:45Z
2019-07-25T17:33:53Z
2019-07-25T17:33:53Z
2019-07-25T17:34:00Z
BUG: Preserve CategoricalDtype._ordered_from_sentinel with pickle
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 1cf452b4a6c2c..7721c90c9b4b4 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -219,7 +219,7 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): kind = "O" # type: str_type str = "|O08" base = np.dtype("O") - _metadata = ("categories", "ordered") + _metadata = ("categories", "ordered", "_ordered_from_sentinel") _cache = {} # type: Dict[str_type, PandasExtensionDtype] def __init__(self, categories=None, ordered: OrderedType = ordered_sentinel): @@ -356,6 +356,7 @@ def __setstate__(self, state: Dict[str_type, Any]) -> None: # pickle -> need to set the settable private ones here (see GH26067) self._categories = state.pop("categories", None) self._ordered = state.pop("ordered", False) + self._ordered_from_sentinel = state.pop("_ordered_from_sentinel", False) def __hash__(self) -> int: # _hash_categories returns a uint64, so use the negative diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index a81c57537408c..d3f0d7c43ee6b 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -903,6 +903,19 @@ def test_ordered_none_default_deprecated(self, ordered): with tm.assert_produces_warning(warning): dtype.ordered + @pytest.mark.parametrize("ordered", [True, False, None, ordered_sentinel]) + def test_pickle_ordered_from_sentinel(self, ordered): + # GH 27295: can remove test when _ordered_from_sentinel is removed (GH 26403) + dtype = CategoricalDtype(categories=list("abc"), ordered=ordered) + + warning = FutureWarning if ordered is ordered_sentinel else None + with tm.assert_produces_warning(warning, check_stacklevel=False): + dtype_from_pickle = tm.round_trip_pickle(dtype) + + result = dtype_from_pickle._ordered_from_sentinel + expected = ordered is ordered_sentinel + assert result is expected + @pytest.mark.parametrize( "dtype", [CategoricalDtype, IntervalDtype, DatetimeTZDtype, PeriodDtype] diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 0238314122462..5389390501b32 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -226,6 +226,15 @@ def test_pickle_preserve_name(self): unpickled = self._pickle_roundtrip_name(tm.makeTimeSeries(name=n)) assert unpickled.name == n + def test_pickle_categorical_ordered_from_sentinel(self): + # GH 27295: can remove test when _ordered_from_sentinel is removed (GH 26403) + s = Series(["a", "b", "c", "a"], dtype="category") + result = tm.round_trip_pickle(s) + result = result.astype("category") + + tm.assert_series_equal(result, s) + assert result.dtype._ordered_from_sentinel is False + def _pickle_roundtrip_name(self, obj): with ensure_clean() as path:
- [X] closes #27295 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/27317
2019-07-10T04:44:36Z
2019-07-10T16:39:22Z
2019-07-10T16:39:22Z
2019-07-10T16:49:34Z
BUG: Fix inserting of wrong-dtyped NaT, closes #27297
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 9caf127553e05..87d3e47cbefe5 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -87,7 +87,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ - +- Bug in :meth:`Series.__setitem__` incorrectly casting ``np.timedelta64("NaT")`` to ``np.datetime64("NaT")`` when inserting into a :class:`Series` with datetime64 dtype (:issue:`27311`) - - diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 7000c07b1f5a6..13812663dd907 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -533,6 +533,9 @@ cpdef convert_scalar(ndarray arr, object value): pass elif isinstance(value, (datetime, np.datetime64, date)): return Timestamp(value).value + elif util.is_timedelta64_object(value): + # exclude np.timedelta64("NaT") from value != value below + pass elif value is None or value != value: return NPY_NAT elif isinstance(value, str): diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ace57938f948c..e352444a98bbe 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2589,8 +2589,9 @@ def setitem(self, indexer, value): try: return super().setitem(indexer, value) except (ValueError, TypeError): + obj_vals = self.values.astype(object) newb = make_block( - self.values.astype(object), placement=self.mgr_locs, klass=ObjectBlock + obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim ) return newb.setitem(indexer, value) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index d73be76795c88..2d36bfdb93a17 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -654,6 +654,38 @@ def test_timedelta_assignment(): tm.assert_series_equal(s, expected) +@pytest.mark.parametrize( + "nat_val,should_cast", + [ + (pd.NaT, True), + (np.timedelta64("NaT", "ns"), False), + (np.datetime64("NaT", "ns"), True), + ], +) +@pytest.mark.parametrize("tz", [None, "UTC"]) +def test_dt64_series_assign_nat(nat_val, should_cast, tz): + # some nat-like values should be cast to datetime64 when inserting + # into a datetime64 series. Others should coerce to object + # and retain their dtypes. + dti = pd.date_range("2016-01-01", periods=3, tz=tz) + base = pd.Series(dti) + expected = pd.Series([pd.NaT] + list(dti[1:]), dtype=dti.dtype) + if not should_cast: + expected = expected.astype(object) + + ser = base.copy(deep=True) + ser[0] = nat_val + tm.assert_series_equal(ser, expected) + + ser = base.copy(deep=True) + ser.loc[0] = nat_val + tm.assert_series_equal(ser, expected) + + ser = base.copy(deep=True) + ser.iloc[0] = nat_val + tm.assert_series_equal(ser, expected) + + @pytest.mark.parametrize( "nat_val,should_cast", [
- [x] closes #27297 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Needs tests for PeriodDType case. The tests are probably not in the ideal place, how to move/parametrize them depends on what axis we want to sort them along (i.e. all Series indexing tests together or all timedelta-insertion tests together)
https://api.github.com/repos/pandas-dev/pandas/pulls/27311
2019-07-09T18:30:44Z
2019-07-22T17:03:57Z
2019-07-22T17:03:56Z
2019-07-22T23:22:38Z
ENH: Preserve key order when passing list of dicts to DataFrame on py 3.6+
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 193a0edee5e96..805fe21bdcc9d 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -400,7 +400,7 @@ of ``object`` dtype. :attr:`Series.str` will now infer the dtype data *within* t .. _whatsnew_0250.api_breaking.groupby_categorical: Categorical dtypes are preserved during groupby -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Previously, columns that were categorical, but not the groupby key(s) would be converted to ``object`` dtype during groupby operations. Pandas now will preserve these dtypes. (:issue:`18502`) @@ -741,6 +741,47 @@ consistent with NumPy and the rest of pandas (:issue:`21801`). cat.argsort() cat[cat.argsort()] +.. _whatsnew_0250.api_breaking.list_of_dict: + +Column order is preserved when passing a list of dicts to DataFrame +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Starting with Python 3.7 the key-order of ``dict`` is `guaranteed <https://mail.python.org/pipermail/python-dev/2017-December/151283.html>`_. In practice, this has been true since +Python 3.6. The :class:`DataFrame` constructor now treats a list of dicts in the same way as +it does a list of ``OrderedDict``, i.e. preserving the order of the dicts. +This change applies only when pandas is running on Python>=3.6 (:issue:`27309`). + +.. ipython:: python + + data = [ + {'name': 'Joe', 'state': 'NY', 'age': 18}, + {'name': 'Jane', 'state': 'KY', 'age': 19, 'hobby': 'Minecraft'}, + {'name': 'Jean', 'state': 'OK', 'age': 20, 'finances': 'good'} + ] + +*Previous Behavior*: + +The columns were lexicographically sorted previously, + +.. code-block:: python + + In [1]: pd.DataFrame(data) + Out[1]: + age finances hobby name state + 0 18 NaN NaN Joe NY + 1 19 NaN Minecraft Jane KY + 2 20 good NaN Jean OK + +*New Behavior*: + +The column order now matches the insertion-order of the keys in the ``dict``, +considering all the records from top to bottom. As a consequence, the column +order of the resulting DataFrame has changed compared to previous pandas verisons. + +.. ipython:: python + + pd.DataFrame(data) + .. _whatsnew_0250.api_breaking.deps: Increased minimum versions for dependencies diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a1989fd62b6ee..a4d355de3d8f0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -313,8 +313,12 @@ class DataFrame(NDFrame): Dict can contain Series, arrays, constants, or list-like objects .. versionchanged :: 0.23.0 - If data is a dict, argument order is maintained for Python 3.6 - and later. + If data is a dict, column order follows insertion-order for + Python 3.6 and later. + + .. versionchanged :: 0.25.0 + If data is a list of dicts, column order follows insertion-order + Python 3.6 and later. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index b4752039cf5b1..f44cb5207891f 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -10,7 +10,7 @@ from pandas._libs import lib from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime import pandas.compat as compat -from pandas.compat import raise_with_traceback +from pandas.compat import PY36, raise_with_traceback from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, @@ -536,9 +536,30 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): + """Convert list of dicts to numpy arrays + + if `columns` is not passed, column names are inferred from the records + - for OrderedDict and (on Python>=3.6) dicts, the column names match + the key insertion-order from the first record to the last. + - For other kinds of dict-likes, the keys are lexically sorted. + + Parameters + ---------- + data : iterable + collection of records (OrderedDict, dict) + columns: iterables or None + coerce_float : bool + dtype : np.dtype + + Returns + ------- + tuple + arrays, columns + """ if columns is None: gen = (list(x.keys()) for x in data) - sort = not any(isinstance(d, OrderedDict) for d in data) + types = (dict, OrderedDict) if PY36 else OrderedDict + sort = not any(isinstance(d, types) for d in data) columns = lib.fast_unique_multiple_list_gen(gen, sort=sort) # assure that they are of the base dict class and not of derived diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index eca827f82e296..736258899a41e 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1119,7 +1119,7 @@ def test_constructor_generator(self): expected = DataFrame({0: range(10), 1: "a"}) tm.assert_frame_equal(result, expected, check_dtype=False) - def test_constructor_list_of_dicts(self): + def test_constructor_list_of_odicts(self): data = [ OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]), OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]), @@ -1340,6 +1340,26 @@ def test_constructor_list_of_namedtuples(self): result = DataFrame(tuples, columns=["y", "z"]) tm.assert_frame_equal(result, expected) + def test_constructor_list_of_dict_order(self): + # GH10056 + data = [ + {"First": 1, "Second": 4, "Third": 7, "Fourth": 10}, + {"Second": 5, "First": 2, "Fourth": 11, "Third": 8}, + {"Second": 6, "First": 3, "Fourth": 12, "Third": 9, "YYY": 14, "XXX": 13}, + ] + expected = DataFrame( + { + "First": [1, 2, 3], + "Second": [4, 5, 6], + "Third": [7, 8, 9], + "Fourth": [10, 11, 12], + "YYY": [None, None, 14], + "XXX": [None, None, 13], + } + ) + result = DataFrame(data) + tm.assert_frame_equal(result, expected, check_like=not PY36) + def test_constructor_orient(self, float_string_frame): data_dict = float_string_frame.T._series recons = DataFrame.from_dict(data_dict, orient="index") diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index e06047b52ac15..f6bb5f774e758 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -7,6 +7,8 @@ import numpy as np import pytest +from pandas.compat import PY36 + from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype import pandas as pd @@ -230,8 +232,10 @@ def test_setitem_dtype_upcast(self): assert df["c"].dtype == np.float64 df.loc[0, "c"] = "foo" - expected = DataFrame([{"a": 1, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]) - tm.assert_frame_equal(df, expected) + expected = DataFrame( + [{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}] + ) + tm.assert_frame_equal(df, expected, check_like=not PY36) # GH10280 df = DataFrame( diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index a32103d7b29b9..3ceddfc3c1db4 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.compat import PY36 + from pandas import DataFrame, Index import pandas.util.testing as tm @@ -351,9 +353,9 @@ def test_non_ascii_key(self): ).decode("utf8") testdata = { + b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1], "sub.A": [1, 3], "sub.B": [2, 4], - b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1], } expected = DataFrame(testdata) @@ -366,21 +368,21 @@ def test_missing_field(self, author_missing_data): ex_data = [ { "info": np.nan, - "author_name.first": np.nan, - "author_name.last_name": np.nan, "info.created_at": np.nan, "info.last_updated": np.nan, + "author_name.first": np.nan, + "author_name.last_name": np.nan, }, { "info": None, - "author_name.first": "Jane", - "author_name.last_name": "Doe", "info.created_at": "11/08/1993", "info.last_updated": "26/05/2012", + "author_name.first": "Jane", + "author_name.last_name": "Doe", }, ] expected = DataFrame(ex_data) - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected, check_like=not PY36) @pytest.mark.parametrize( "max_level,expected", @@ -508,12 +510,13 @@ def test_missing_meta(self, missing_metadata): data=missing_metadata, record_path="addresses", meta="name", errors="ignore" ) ex_data = [ - ["Massillon", 9562, "OH", "Morris St.", 44646, "Alice"], - ["Elizabethton", 8449, "TN", "Spring St.", 37643, np.nan], + [9562, "Morris St.", "Massillon", "OH", 44646, "Alice"], + [8449, "Spring St.", "Elizabethton", "TN", 37643, np.nan], ] columns = ["city", "number", "state", "street", "zip", "name"] + columns = ["number", "street", "city", "state", "zip", "name"] expected = DataFrame(ex_data, columns=columns) - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected, check_like=not PY36) def test_donot_drop_nonevalues(self): # GH21356 @@ -684,7 +687,7 @@ def test_with_large_max_level(self): "CreatedBy.user.family_tree.father.name": "Father001", "CreatedBy.user.family_tree.father.father.Name": "Father002", "CreatedBy.user.family_tree.father.father.father.name": "Father003", - "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", + "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", # noqa: E501 } ] output = nested_to_record(input_data, max_level=max_level)
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Related to #25915, #26587, #24859, #26113, #10056 (orderedicts), #11181/#11416 (list of namedtuple). #25911. **Update**: #13304/#13309 Was merged three years ago so let's just make list of dicts act like list of OrderedDict (as pointed out by @jorisvandenbossche). **Actual** ```python In [63]: data= [ ...: {'name': 'Joe', 'state': 'NY', 'age': 18}, ...: {'name': 'Jane', 'state': 'KY', 'age': 19} ...: ] ...: pd.DataFrame(data) Out[63]: age name state 0 18 Joe NY 1 19 Jane KY ``` **Expected** ``` In [64]: pd.DataFrame(data) Out[64]: name state age 0 Joe NY 18 1 Jane KY 19 ``` <del> Four years ago, #10056 asked for the implied order of columns in a list of `OrderedDict` to be preserved by the `DataFrame` constructor. @thatneat [commented](https://github.com/pandas-dev/pandas/issues/10056#issuecomment-509383829) yesterday that with 3.7 guaranteed dict order, this should extend to dict-like in general. I think users have a reasonable expectation for this to work, and therefore that pandas should support it. @jreback [voted](https://github.com/pandas-dev/pandas/issues/10056#issuecomment-98812435) +0 on adding this (four years ago). `namedtuple` has the convenient property of homogeneous keys and key-order which a list of dicts doesn't have, dicts are allowed to omit keys, and the key order also may change from dict to dict. Given that, I settled on a reasonable compromise that matches user expectations in practice: 1. Only look at the first dict in the list. 2. Only guarantee the column order of the keys which actually appear in it. 3. **Clarification** the order among columns not included in the first dict is undefined, except that they will appear after all the columns that do. 4. **Added** Changes apply to Python3.6+ only In practice, I think the only case users actually care about is sensible behavior when passing a list of dicts which is homogeneous in terms of key and key-order, which this PR provides. </del>
https://api.github.com/repos/pandas-dev/pandas/pulls/27309
2019-07-09T17:51:59Z
2019-07-17T11:46:54Z
2019-07-17T11:46:54Z
2019-08-02T14:53:44Z
STYLE: fix line length check of flake8
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 818d844ca7994..3cf358261e685 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -284,7 +284,7 @@ def __init__( if self.name is None: self.name = index.names[level] - self.grouper, self._labels, self._group_index = index._get_grouper_for_level( + self.grouper, self._labels, self._group_index = index._get_grouper_for_level( # noqa: E501 self.grouper, level ) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e27f85eb6d0a4..612a857897a0c 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1230,8 +1230,8 @@ def _validate_read_indexer(self, key, indexer, axis, raise_missing=False): KeyError in the future, you can use .reindex() as an alternative. See the documentation here: - https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike""" - ) # noqa + https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike""" # noqa: E501 + ) if not (ax.is_categorical() or ax.is_interval()): warnings.warn(_missing_key_warning, FutureWarning, stacklevel=6) @@ -1379,8 +1379,8 @@ class _IXIndexer(_NDFrameIndexer): .iloc for positional indexing See the documentation here: - http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#ix-indexer-is-deprecated""" - ) # noqa + http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#ix-indexer-is-deprecated""" # noqa: E501 + ) def __init__(self, name, obj): warnings.warn(self._ix_deprecation_warning, FutureWarning, stacklevel=2) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 78440939ebc01..356934d457cc9 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1913,7 +1913,7 @@ def __init__(self, src, **kwds): else: if len(self._reader.header) > 1: # we have a multi index in the columns - self.names, self.index_names, self.col_names, passed_names = self._extract_multi_indexer_columns( + self.names, self.index_names, self.col_names, passed_names = self._extract_multi_indexer_columns( # noqa: E501 self._reader.header, self.index_names, self.col_names, passed_names ) else: @@ -2308,7 +2308,7 @@ def __init__(self, f, **kwds): # The original set is stored in self.original_columns. if len(self.columns) > 1: # we are processing a multi index column - self.columns, self.index_names, self.col_names, _ = self._extract_multi_indexer_columns( + self.columns, self.index_names, self.col_names, _ = self._extract_multi_indexer_columns( # noqa: E501 self.columns, self.index_names, self.col_names ) # Update list of original names to include all indices. diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index a32103d7b29b9..a625c912d1d8e 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -684,7 +684,7 @@ def test_with_large_max_level(self): "CreatedBy.user.family_tree.father.name": "Father001", "CreatedBy.user.family_tree.father.father.Name": "Father002", "CreatedBy.user.family_tree.father.father.father.name": "Father003", - "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", + "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", # noqa: E501 } ] output = nested_to_record(input_data, max_level=max_level) diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index 27a23180b269a..df3c7fe9c9936 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -338,9 +338,9 @@ def test_hash_collisions(): # # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726 hashes = [ - "Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa - "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe", - ] # noqa + "Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa: E501 + "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe", # noqa: E501 + ] # These should be different. result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8") diff --git a/setup.cfg b/setup.cfg index 7549bfe2e325d..e559ece2a759a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,7 +14,6 @@ parentdir_prefix = pandas- [flake8] max-line-length = 88 ignore = - E501, # longer line length E203, # space before : (needed for how black formats slicing) W503, # line break before binary operator W504, # line break after binary operator
I think I made a small mistake in https://github.com/pandas-dev/pandas/pull/27076 in adding the E501 code (line length) to the ignore list, as setting the flake8 max line length to 88 should have been sufficient. Black does not check or correct line length in comments or multiline strings (eg docstrings), so this still need to be catched by flake8
https://api.github.com/repos/pandas-dev/pandas/pulls/27307
2019-07-09T13:57:21Z
2019-07-09T20:50:36Z
2019-07-09T20:50:35Z
2019-07-09T20:50:41Z
CLN: Split test_window.py
diff --git a/pandas/tests/window/__init__.py b/pandas/tests/window/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py new file mode 100644 index 0000000000000..7ea4be25ca2a6 --- /dev/null +++ b/pandas/tests/window/conftest.py @@ -0,0 +1,49 @@ +import pytest + + +@pytest.fixture(params=[True, False]) +def raw(request): + return request.param + + +@pytest.fixture( + params=[ + "triang", + "blackman", + "hamming", + "bartlett", + "bohman", + "blackmanharris", + "nuttall", + "barthann", + ] +) +def win_types(request): + return request.param + + +@pytest.fixture(params=["kaiser", "gaussian", "general_gaussian", "exponential"]) +def win_types_special(request): + return request.param + + +@pytest.fixture( + params=["sum", "mean", "median", "max", "min", "var", "std", "kurt", "skew"] +) +def arithmetic_win_operators(request): + return request.param + + +@pytest.fixture(params=["right", "left", "both", "neither"]) +def closed(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def center(request): + return request.param + + +@pytest.fixture(params=[None, 1]) +def min_periods(request): + return request.param diff --git a/pandas/tests/window/test_dtypes.py b/pandas/tests/window/test_dtypes.py new file mode 100644 index 0000000000000..ab2915a333afd --- /dev/null +++ b/pandas/tests/window/test_dtypes.py @@ -0,0 +1,228 @@ +from itertools import product + +import numpy as np +import pytest + +from pandas import DataFrame, Series +from pandas.core.base import DataError +import pandas.util.testing as tm + +# gh-12373 : rolling functions error on float32 data +# make sure rolling functions works for different dtypes +# +# NOTE that these are yielded tests and so _create_data +# is explicitly called. +# +# further note that we are only checking rolling for fully dtype +# compliance (though both expanding and ewm inherit) + + +class Dtype: + window = 2 + + funcs = { + "count": lambda v: v.count(), + "max": lambda v: v.max(), + "min": lambda v: v.min(), + "sum": lambda v: v.sum(), + "mean": lambda v: v.mean(), + "std": lambda v: v.std(), + "var": lambda v: v.var(), + "median": lambda v: v.median(), + } + + def get_expects(self): + expects = { + "sr1": { + "count": Series([1, 2, 2, 2, 2], dtype="float64"), + "max": Series([np.nan, 1, 2, 3, 4], dtype="float64"), + "min": Series([np.nan, 0, 1, 2, 3], dtype="float64"), + "sum": Series([np.nan, 1, 3, 5, 7], dtype="float64"), + "mean": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"), + "std": Series([np.nan] + [np.sqrt(0.5)] * 4, dtype="float64"), + "var": Series([np.nan, 0.5, 0.5, 0.5, 0.5], dtype="float64"), + "median": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"), + }, + "sr2": { + "count": Series([1, 2, 2, 2, 2], dtype="float64"), + "max": Series([np.nan, 10, 8, 6, 4], dtype="float64"), + "min": Series([np.nan, 8, 6, 4, 2], dtype="float64"), + "sum": Series([np.nan, 18, 14, 10, 6], dtype="float64"), + "mean": Series([np.nan, 9, 7, 5, 3], dtype="float64"), + "std": Series([np.nan] + [np.sqrt(2)] * 4, dtype="float64"), + "var": Series([np.nan, 2, 2, 2, 2], dtype="float64"), + "median": Series([np.nan, 9, 7, 5, 3], dtype="float64"), + }, + "df": { + "count": DataFrame( + {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}, + dtype="float64", + ), + "max": DataFrame( + {0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])}, + dtype="float64", + ), + "min": DataFrame( + {0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])}, + dtype="float64", + ), + "sum": DataFrame( + { + 0: Series([np.nan, 2, 6, 10, 14]), + 1: Series([np.nan, 4, 8, 12, 16]), + }, + dtype="float64", + ), + "mean": DataFrame( + {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}, + dtype="float64", + ), + "std": DataFrame( + { + 0: Series([np.nan] + [np.sqrt(2)] * 4), + 1: Series([np.nan] + [np.sqrt(2)] * 4), + }, + dtype="float64", + ), + "var": DataFrame( + {0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])}, + dtype="float64", + ), + "median": DataFrame( + {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}, + dtype="float64", + ), + }, + } + return expects + + def _create_dtype_data(self, dtype): + sr1 = Series(np.arange(5), dtype=dtype) + sr2 = Series(np.arange(10, 0, -2), dtype=dtype) + df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype) + + data = {"sr1": sr1, "sr2": sr2, "df": df} + + return data + + def _create_data(self): + self.data = self._create_dtype_data(self.dtype) + self.expects = self.get_expects() + + def test_dtypes(self): + self._create_data() + for f_name, d_name in product(self.funcs.keys(), self.data.keys()): + + f = self.funcs[f_name] + d = self.data[d_name] + exp = self.expects[d_name][f_name] + self.check_dtypes(f, f_name, d, d_name, exp) + + def check_dtypes(self, f, f_name, d, d_name, exp): + roll = d.rolling(window=self.window) + result = f(roll) + tm.assert_almost_equal(result, exp) + + +class TestDtype_object(Dtype): + dtype = object + + +class Dtype_integer(Dtype): + pass + + +class TestDtype_int8(Dtype_integer): + dtype = np.int8 + + +class TestDtype_int16(Dtype_integer): + dtype = np.int16 + + +class TestDtype_int32(Dtype_integer): + dtype = np.int32 + + +class TestDtype_int64(Dtype_integer): + dtype = np.int64 + + +class Dtype_uinteger(Dtype): + pass + + +class TestDtype_uint8(Dtype_uinteger): + dtype = np.uint8 + + +class TestDtype_uint16(Dtype_uinteger): + dtype = np.uint16 + + +class TestDtype_uint32(Dtype_uinteger): + dtype = np.uint32 + + +class TestDtype_uint64(Dtype_uinteger): + dtype = np.uint64 + + +class Dtype_float(Dtype): + pass + + +class TestDtype_float16(Dtype_float): + dtype = np.float16 + + +class TestDtype_float32(Dtype_float): + dtype = np.float32 + + +class TestDtype_float64(Dtype_float): + dtype = np.float64 + + +class TestDtype_category(Dtype): + dtype = "category" + include_df = False + + def _create_dtype_data(self, dtype): + sr1 = Series(range(5), dtype=dtype) + sr2 = Series(range(10, 0, -2), dtype=dtype) + + data = {"sr1": sr1, "sr2": sr2} + + return data + + +class DatetimeLike(Dtype): + def check_dtypes(self, f, f_name, d, d_name, exp): + + roll = d.rolling(window=self.window) + if f_name == "count": + result = f(roll) + tm.assert_almost_equal(result, exp) + + else: + with pytest.raises(DataError): + f(roll) + + +class TestDtype_timedelta(DatetimeLike): + dtype = np.dtype("m8[ns]") + + +class TestDtype_datetime(DatetimeLike): + dtype = np.dtype("M8[ns]") + + +class TestDtype_datetime64UTC(DatetimeLike): + dtype = "datetime64[ns, UTC]" + + def _create_data(self): + pytest.skip( + "direct creation of extension dtype " + "datetime64[ns, UTC] is not supported ATM" + ) diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py new file mode 100644 index 0000000000000..56d89e15c418c --- /dev/null +++ b/pandas/tests/window/test_pairwise.py @@ -0,0 +1,183 @@ +import warnings + +import pytest + +from pandas import DataFrame, Series +from pandas.core.sorting import safe_sort +import pandas.util.testing as tm + + +class TestPairwise: + + # GH 7738 + df1s = [ + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", "C"]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1.0, 0]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0.0, 1]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", 1]), + DataFrame([[2.0, 4.0], [1.0, 2.0], [5.0, 2.0], [8.0, 1.0]], columns=[1, 0.0]), + DataFrame([[2, 4.0], [1, 2.0], [5, 2.0], [8, 1.0]], columns=[0, 1.0]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.0]], columns=[1.0, "X"]), + ] + df2 = DataFrame( + [[None, 1, 1], [None, 1, 2], [None, 3, 2], [None, 8, 1]], + columns=["Y", "Z", "X"], + ) + s = Series([1, 1, 3, 8]) + + def compare(self, result, expected): + + # since we have sorted the results + # we can only compare non-nans + result = result.dropna().values + expected = expected.dropna().values + + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + @pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()]) + def test_no_flex(self, f): + + # DataFrame methods (which do not call _flex_binary_moment()) + + results = [f(df) for df in self.df1s] + for (df, result) in zip(self.df1s, results): + tm.assert_index_equal(result.index, df.columns) + tm.assert_index_equal(result.columns, df.columns) + for i, result in enumerate(results): + if i > 0: + self.compare(result, results[0]) + + @pytest.mark.parametrize( + "f", + [ + lambda x: x.expanding().cov(pairwise=True), + lambda x: x.expanding().corr(pairwise=True), + lambda x: x.rolling(window=3).cov(pairwise=True), + lambda x: x.rolling(window=3).corr(pairwise=True), + lambda x: x.ewm(com=3).cov(pairwise=True), + lambda x: x.ewm(com=3).corr(pairwise=True), + ], + ) + def test_pairwise_with_self(self, f): + + # DataFrame with itself, pairwise=True + # note that we may construct the 1st level of the MI + # in a non-monotonic way, so compare accordingly + results = [] + for i, df in enumerate(self.df1s): + result = f(df) + tm.assert_index_equal(result.index.levels[0], df.index, check_names=False) + tm.assert_numpy_array_equal( + safe_sort(result.index.levels[1]), safe_sort(df.columns.unique()) + ) + tm.assert_index_equal(result.columns, df.columns) + results.append(df) + + for i, result in enumerate(results): + if i > 0: + self.compare(result, results[0]) + + @pytest.mark.parametrize( + "f", + [ + lambda x: x.expanding().cov(pairwise=False), + lambda x: x.expanding().corr(pairwise=False), + lambda x: x.rolling(window=3).cov(pairwise=False), + lambda x: x.rolling(window=3).corr(pairwise=False), + lambda x: x.ewm(com=3).cov(pairwise=False), + lambda x: x.ewm(com=3).corr(pairwise=False), + ], + ) + def test_no_pairwise_with_self(self, f): + + # DataFrame with itself, pairwise=False + results = [f(df) for df in self.df1s] + for (df, result) in zip(self.df1s, results): + tm.assert_index_equal(result.index, df.index) + tm.assert_index_equal(result.columns, df.columns) + for i, result in enumerate(results): + if i > 0: + self.compare(result, results[0]) + + @pytest.mark.parametrize( + "f", + [ + lambda x, y: x.expanding().cov(y, pairwise=True), + lambda x, y: x.expanding().corr(y, pairwise=True), + lambda x, y: x.rolling(window=3).cov(y, pairwise=True), + lambda x, y: x.rolling(window=3).corr(y, pairwise=True), + lambda x, y: x.ewm(com=3).cov(y, pairwise=True), + lambda x, y: x.ewm(com=3).corr(y, pairwise=True), + ], + ) + def test_pairwise_with_other(self, f): + + # DataFrame with another DataFrame, pairwise=True + results = [f(df, self.df2) for df in self.df1s] + for (df, result) in zip(self.df1s, results): + tm.assert_index_equal(result.index.levels[0], df.index, check_names=False) + tm.assert_numpy_array_equal( + safe_sort(result.index.levels[1]), safe_sort(self.df2.columns.unique()) + ) + for i, result in enumerate(results): + if i > 0: + self.compare(result, results[0]) + + @pytest.mark.parametrize( + "f", + [ + lambda x, y: x.expanding().cov(y, pairwise=False), + lambda x, y: x.expanding().corr(y, pairwise=False), + lambda x, y: x.rolling(window=3).cov(y, pairwise=False), + lambda x, y: x.rolling(window=3).corr(y, pairwise=False), + lambda x, y: x.ewm(com=3).cov(y, pairwise=False), + lambda x, y: x.ewm(com=3).corr(y, pairwise=False), + ], + ) + def test_no_pairwise_with_other(self, f): + + # DataFrame with another DataFrame, pairwise=False + results = [ + f(df, self.df2) if df.columns.is_unique else None for df in self.df1s + ] + for (df, result) in zip(self.df1s, results): + if result is not None: + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", RuntimeWarning) + # we can have int and str columns + expected_index = df.index.union(self.df2.index) + expected_columns = df.columns.union(self.df2.columns) + tm.assert_index_equal(result.index, expected_index) + tm.assert_index_equal(result.columns, expected_columns) + else: + with pytest.raises(ValueError, match="'arg1' columns are not unique"): + f(df, self.df2) + with pytest.raises(ValueError, match="'arg2' columns are not unique"): + f(self.df2, df) + + @pytest.mark.parametrize( + "f", + [ + lambda x, y: x.expanding().cov(y), + lambda x, y: x.expanding().corr(y), + lambda x, y: x.rolling(window=3).cov(y), + lambda x, y: x.rolling(window=3).corr(y), + lambda x, y: x.ewm(com=3).cov(y), + lambda x, y: x.ewm(com=3).corr(y), + ], + ) + def test_pairwise_with_series(self, f): + + # DataFrame with a Series + results = [f(df, self.s) for df in self.df1s] + [ + f(self.s, df) for df in self.df1s + ] + for (df, result) in zip(self.df1s, results): + tm.assert_index_equal(result.index, df.index) + tm.assert_index_equal(result.columns, df.columns) + for i, result in enumerate(results): + if i > 0: + self.compare(result, results[0]) diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py new file mode 100644 index 0000000000000..e057eadae9da8 --- /dev/null +++ b/pandas/tests/window/test_timeseries_window.py @@ -0,0 +1,692 @@ +import numpy as np +import pytest + +from pandas import DataFrame, Index, Series, Timestamp, date_range, to_datetime +import pandas.util.testing as tm + +import pandas.tseries.offsets as offsets + + +class TestRollingTS: + + # rolling time-series friendly + # xref GH13327 + + def setup_method(self, method): + + self.regular = DataFrame( + {"A": date_range("20130101", periods=5, freq="s"), "B": range(5)} + ).set_index("A") + + self.ragged = DataFrame({"B": range(5)}) + self.ragged.index = [ + Timestamp("20130101 09:00:00"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:05"), + Timestamp("20130101 09:00:06"), + ] + + def test_doc_string(self): + + df = DataFrame( + {"B": [0, 1, 2, np.nan, 4]}, + index=[ + Timestamp("20130101 09:00:00"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:05"), + Timestamp("20130101 09:00:06"), + ], + ) + df + df.rolling("2s").sum() + + def test_valid(self): + + df = self.regular + + # not a valid freq + with pytest.raises(ValueError): + df.rolling(window="foobar") + + # not a datetimelike index + with pytest.raises(ValueError): + df.reset_index().rolling(window="foobar") + + # non-fixed freqs + for freq in ["2MS", offsets.MonthBegin(2)]: + with pytest.raises(ValueError): + df.rolling(window=freq) + + for freq in ["1D", offsets.Day(2), "2ms"]: + df.rolling(window=freq) + + # non-integer min_periods + for minp in [1.0, "foo", np.array([1, 2, 3])]: + with pytest.raises(ValueError): + df.rolling(window="1D", min_periods=minp) + + # center is not implemented + with pytest.raises(NotImplementedError): + df.rolling(window="1D", center=True) + + def test_on(self): + + df = self.regular + + # not a valid column + with pytest.raises(ValueError): + df.rolling(window="2s", on="foobar") + + # column is valid + df = df.copy() + df["C"] = date_range("20130101", periods=len(df)) + df.rolling(window="2d", on="C").sum() + + # invalid columns + with pytest.raises(ValueError): + df.rolling(window="2d", on="B") + + # ok even though on non-selected + df.rolling(window="2d", on="C").B.sum() + + def test_monotonic_on(self): + + # on/index must be monotonic + df = DataFrame( + {"A": date_range("20130101", periods=5, freq="s"), "B": range(5)} + ) + + assert df.A.is_monotonic + df.rolling("2s", on="A").sum() + + df = df.set_index("A") + assert df.index.is_monotonic + df.rolling("2s").sum() + + # non-monotonic + df.index = reversed(df.index.tolist()) + assert not df.index.is_monotonic + + with pytest.raises(ValueError): + df.rolling("2s").sum() + + df = df.reset_index() + with pytest.raises(ValueError): + df.rolling("2s", on="A").sum() + + def test_frame_on(self): + + df = DataFrame( + {"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")} + ) + + df["A"] = [ + Timestamp("20130101 09:00:00"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:05"), + Timestamp("20130101 09:00:06"), + ] + + # we are doing simulating using 'on' + expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True) + + result = df.rolling("2s", on="A").B.sum() + tm.assert_series_equal(result, expected) + + # test as a frame + # we should be ignoring the 'on' as an aggregation column + # note that the expected is setting, computing, and resetting + # so the columns need to be switched compared + # to the actual result where they are ordered as in the + # original + expected = ( + df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]] + ) + + result = df.rolling("2s", on="A")[["B"]].sum() + tm.assert_frame_equal(result, expected) + + def test_frame_on2(self): + + # using multiple aggregation columns + df = DataFrame( + { + "A": [0, 1, 2, 3, 4], + "B": [0, 1, 2, np.nan, 4], + "C": Index( + [ + Timestamp("20130101 09:00:00"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:05"), + Timestamp("20130101 09:00:06"), + ] + ), + }, + columns=["A", "C", "B"], + ) + + expected1 = DataFrame( + {"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]}, + columns=["A", "C", "B"], + ) + + result = df.rolling("2s", on="C").sum() + expected = expected1 + tm.assert_frame_equal(result, expected) + + expected = Series([0, 1, 3, np.nan, 4], name="B") + result = df.rolling("2s", on="C").B.sum() + tm.assert_series_equal(result, expected) + + expected = expected1[["A", "B", "C"]] + result = df.rolling("2s", on="C")[["A", "B", "C"]].sum() + tm.assert_frame_equal(result, expected) + + def test_basic_regular(self): + + df = self.regular.copy() + + df.index = date_range("20130101", periods=5, freq="D") + expected = df.rolling(window=1, min_periods=1).sum() + result = df.rolling(window="1D").sum() + tm.assert_frame_equal(result, expected) + + df.index = date_range("20130101", periods=5, freq="2D") + expected = df.rolling(window=1, min_periods=1).sum() + result = df.rolling(window="2D", min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + expected = df.rolling(window=1, min_periods=1).sum() + result = df.rolling(window="2D", min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + expected = df.rolling(window=1).sum() + result = df.rolling(window="2D").sum() + tm.assert_frame_equal(result, expected) + + def test_min_periods(self): + + # compare for min_periods + df = self.regular + + # these slightly different + expected = df.rolling(2, min_periods=1).sum() + result = df.rolling("2s").sum() + tm.assert_frame_equal(result, expected) + + expected = df.rolling(2, min_periods=1).sum() + result = df.rolling("2s", min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + def test_closed(self): + + # xref GH13965 + + df = DataFrame( + {"A": [1] * 5}, + index=[ + Timestamp("20130101 09:00:01"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:04"), + Timestamp("20130101 09:00:06"), + ], + ) + + # closed must be 'right', 'left', 'both', 'neither' + with pytest.raises(ValueError): + self.regular.rolling(window="2s", closed="blabla") + + expected = df.copy() + expected["A"] = [1.0, 2, 2, 2, 1] + result = df.rolling("2s", closed="right").sum() + tm.assert_frame_equal(result, expected) + + # default should be 'right' + result = df.rolling("2s").sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [1.0, 2, 3, 3, 2] + result = df.rolling("2s", closed="both").sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [np.nan, 1.0, 2, 2, 1] + result = df.rolling("2s", closed="left").sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [np.nan, 1.0, 1, 1, np.nan] + result = df.rolling("2s", closed="neither").sum() + tm.assert_frame_equal(result, expected) + + def test_ragged_sum(self): + + df = self.ragged + result = df.rolling(window="1s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 3, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=2).sum() + expected = df.copy() + expected["B"] = [np.nan, np.nan, 3, np.nan, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="3s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 5, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="3s").sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 5, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="4s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 6, 9] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="4s", min_periods=3).sum() + expected = df.copy() + expected["B"] = [np.nan, np.nan, 3, 6, 9] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 6, 10] + tm.assert_frame_equal(result, expected) + + def test_ragged_mean(self): + + df = self.ragged + result = df.rolling(window="1s", min_periods=1).mean() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).mean() + expected = df.copy() + expected["B"] = [0.0, 1, 1.5, 3.0, 3.5] + tm.assert_frame_equal(result, expected) + + def test_ragged_median(self): + + df = self.ragged + result = df.rolling(window="1s", min_periods=1).median() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).median() + expected = df.copy() + expected["B"] = [0.0, 1, 1.5, 3.0, 3.5] + tm.assert_frame_equal(result, expected) + + def test_ragged_quantile(self): + + df = self.ragged + result = df.rolling(window="1s", min_periods=1).quantile(0.5) + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).quantile(0.5) + expected = df.copy() + expected["B"] = [0.0, 1, 1.5, 3.0, 3.5] + tm.assert_frame_equal(result, expected) + + def test_ragged_std(self): + + df = self.ragged + result = df.rolling(window="1s", min_periods=1).std(ddof=0) + expected = df.copy() + expected["B"] = [0.0] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="1s", min_periods=1).std(ddof=1) + expected = df.copy() + expected["B"] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="3s", min_periods=1).std(ddof=0) + expected = df.copy() + expected["B"] = [0.0] + [0.5] * 4 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).std(ddof=1) + expected = df.copy() + expected["B"] = [np.nan, 0.707107, 1.0, 1.0, 1.290994] + tm.assert_frame_equal(result, expected) + + def test_ragged_var(self): + + df = self.ragged + result = df.rolling(window="1s", min_periods=1).var(ddof=0) + expected = df.copy() + expected["B"] = [0.0] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="1s", min_periods=1).var(ddof=1) + expected = df.copy() + expected["B"] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="3s", min_periods=1).var(ddof=0) + expected = df.copy() + expected["B"] = [0.0] + [0.25] * 4 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).var(ddof=1) + expected = df.copy() + expected["B"] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.0] + tm.assert_frame_equal(result, expected) + + def test_ragged_skew(self): + + df = self.ragged + result = df.rolling(window="3s", min_periods=1).skew() + expected = df.copy() + expected["B"] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).skew() + expected = df.copy() + expected["B"] = [np.nan] * 2 + [0.0, 0.0, 0.0] + tm.assert_frame_equal(result, expected) + + def test_ragged_kurt(self): + + df = self.ragged + result = df.rolling(window="3s", min_periods=1).kurt() + expected = df.copy() + expected["B"] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).kurt() + expected = df.copy() + expected["B"] = [np.nan] * 4 + [-1.2] + tm.assert_frame_equal(result, expected) + + def test_ragged_count(self): + + df = self.ragged + result = df.rolling(window="1s", min_periods=1).count() + expected = df.copy() + expected["B"] = [1.0, 1, 1, 1, 1] + tm.assert_frame_equal(result, expected) + + df = self.ragged + result = df.rolling(window="1s").count() + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).count() + expected = df.copy() + expected["B"] = [1.0, 1, 2, 1, 2] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=2).count() + expected = df.copy() + expected["B"] = [np.nan, np.nan, 2, np.nan, 2] + tm.assert_frame_equal(result, expected) + + def test_regular_min(self): + + df = DataFrame( + {"A": date_range("20130101", periods=5, freq="s"), "B": [0.0, 1, 2, 3, 4]} + ).set_index("A") + result = df.rolling("1s").min() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + df = DataFrame( + {"A": date_range("20130101", periods=5, freq="s"), "B": [5, 4, 3, 4, 5]} + ).set_index("A") + + tm.assert_frame_equal(result, expected) + result = df.rolling("2s").min() + expected = df.copy() + expected["B"] = [5.0, 4, 3, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling("5s").min() + expected = df.copy() + expected["B"] = [5.0, 4, 3, 3, 3] + tm.assert_frame_equal(result, expected) + + def test_ragged_min(self): + + df = self.ragged + + result = df.rolling(window="1s", min_periods=1).min() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).min() + expected = df.copy() + expected["B"] = [0.0, 1, 1, 3, 3] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).min() + expected = df.copy() + expected["B"] = [0.0, 0, 0, 1, 1] + tm.assert_frame_equal(result, expected) + + def test_perf_min(self): + + N = 10000 + + dfp = DataFrame( + {"B": np.random.randn(N)}, index=date_range("20130101", periods=N, freq="s") + ) + expected = dfp.rolling(2, min_periods=1).min() + result = dfp.rolling("2s").min() + assert ((result - expected) < 0.01).all().bool() + + expected = dfp.rolling(200, min_periods=1).min() + result = dfp.rolling("200s").min() + assert ((result - expected) < 0.01).all().bool() + + def test_ragged_max(self): + + df = self.ragged + + result = df.rolling(window="1s", min_periods=1).max() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).max() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).max() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + def test_ragged_apply(self, raw): + + df = self.ragged + + f = lambda x: 1 + result = df.rolling(window="1s", min_periods=1).apply(f, raw=raw) + expected = df.copy() + expected["B"] = 1.0 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).apply(f, raw=raw) + expected = df.copy() + expected["B"] = 1.0 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).apply(f, raw=raw) + expected = df.copy() + expected["B"] = 1.0 + tm.assert_frame_equal(result, expected) + + def test_all(self): + + # simple comparison of integer vs time-based windowing + df = self.regular * 2 + er = df.rolling(window=1) + r = df.rolling(window="1s") + + for f in [ + "sum", + "mean", + "count", + "median", + "std", + "var", + "kurt", + "skew", + "min", + "max", + ]: + + result = getattr(r, f)() + expected = getattr(er, f)() + tm.assert_frame_equal(result, expected) + + result = r.quantile(0.5) + expected = er.quantile(0.5) + tm.assert_frame_equal(result, expected) + + def test_all_apply(self, raw): + + df = self.regular * 2 + er = df.rolling(window=1) + r = df.rolling(window="1s") + + result = r.apply(lambda x: 1, raw=raw) + expected = er.apply(lambda x: 1, raw=raw) + tm.assert_frame_equal(result, expected) + + def test_all2(self): + + # more sophisticated comparison of integer vs. + # time-based windowing + df = DataFrame( + {"B": np.arange(50)}, index=date_range("20130101", periods=50, freq="H") + ) + # in-range data + dft = df.between_time("09:00", "16:00") + + r = dft.rolling(window="5H") + + for f in [ + "sum", + "mean", + "count", + "median", + "std", + "var", + "kurt", + "skew", + "min", + "max", + ]: + + result = getattr(r, f)() + + # we need to roll the days separately + # to compare with a time-based roll + # finally groupby-apply will return a multi-index + # so we need to drop the day + def agg_by_day(x): + x = x.between_time("09:00", "16:00") + return getattr(x.rolling(5, min_periods=1), f)() + + expected = ( + df.groupby(df.index.day) + .apply(agg_by_day) + .reset_index(level=0, drop=True) + ) + + tm.assert_frame_equal(result, expected) + + def test_groupby_monotonic(self): + + # GH 15130 + # we don't need to validate monotonicity when grouping + + data = [ + ["David", "1/1/2015", 100], + ["David", "1/5/2015", 500], + ["David", "5/30/2015", 50], + ["David", "7/25/2015", 50], + ["Ryan", "1/4/2014", 100], + ["Ryan", "1/19/2015", 500], + ["Ryan", "3/31/2016", 50], + ["Joe", "7/1/2015", 100], + ["Joe", "9/9/2015", 500], + ["Joe", "10/15/2015", 50], + ] + + df = DataFrame(data=data, columns=["name", "date", "amount"]) + df["date"] = to_datetime(df["date"]) + + expected = ( + df.set_index("date") + .groupby("name") + .apply(lambda x: x.rolling("180D")["amount"].sum()) + ) + result = df.groupby("name").rolling("180D", on="date")["amount"].sum() + tm.assert_series_equal(result, expected) + + def test_non_monotonic(self): + # GH 13966 (similar to #15130, closed by #15175) + + dates = date_range(start="2016-01-01 09:30:00", periods=20, freq="s") + df = DataFrame( + { + "A": [1] * 20 + [2] * 12 + [3] * 8, + "B": np.concatenate((dates, dates)), + "C": np.arange(40), + } + ) + + result = df.groupby("A").rolling("4s", on="B").C.mean() + expected = ( + df.set_index("B").groupby("A").apply(lambda x: x.rolling("4s")["C"].mean()) + ) + tm.assert_series_equal(result, expected) + + df2 = df.sort_values("B") + result = df2.groupby("A").rolling("4s", on="B").C.mean() + tm.assert_series_equal(result, expected) + + def test_rolling_cov_offset(self): + # GH16058 + + idx = date_range("2017-01-01", periods=24, freq="1h") + ss = Series(np.arange(len(idx)), index=idx) + + result = ss.rolling("2h").cov() + expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx) + tm.assert_series_equal(result, expected) + + expected2 = ss.rolling(2, min_periods=1).cov() + tm.assert_series_equal(result, expected2) + + result = ss.rolling("3h").cov() + expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx) + tm.assert_series_equal(result, expected) + + expected2 = ss.rolling(3, min_periods=1).cov() + tm.assert_series_equal(result, expected2) diff --git a/pandas/tests/test_window.py b/pandas/tests/window/test_window.py similarity index 76% rename from pandas/tests/test_window.py rename to pandas/tests/window/test_window.py index fca88ff3ce8ce..d85e22de1d176 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/window/test_window.py @@ -1,6 +1,5 @@ from collections import OrderedDict from datetime import datetime, timedelta -from itertools import product import warnings from warnings import catch_warnings @@ -13,8 +12,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, Timestamp, bdate_range, concat, isna, notna -from pandas.core.base import DataError, SpecificationError -from pandas.core.sorting import safe_sort +from pandas.core.base import SpecificationError import pandas.core.window as rwindow import pandas.util.testing as tm @@ -23,46 +21,6 @@ N, K = 100, 10 -def assert_equal(left, right): - if isinstance(left, Series): - tm.assert_series_equal(left, right) - else: - tm.assert_frame_equal(left, right) - - -@pytest.fixture(params=[True, False]) -def raw(request): - return request.param - - -@pytest.fixture( - params=[ - "triang", - "blackman", - "hamming", - "bartlett", - "bohman", - "blackmanharris", - "nuttall", - "barthann", - ] -) -def win_types(request): - return request.param - - -@pytest.fixture(params=["kaiser", "gaussian", "general_gaussian", "exponential"]) -def win_types_special(request): - return request.param - - -@pytest.fixture( - params=["sum", "mean", "median", "max", "min", "var", "std", "kurt", "skew"] -) -def arithmetic_win_operators(request): - return request.param - - class Base: _nan_locs = np.arange(20, 40) @@ -963,225 +921,6 @@ def test_numpy_compat(self, method): getattr(e, method)(dtype=np.float64) -# gh-12373 : rolling functions error on float32 data -# make sure rolling functions works for different dtypes -# -# NOTE that these are yielded tests and so _create_data -# is explicitly called. -# -# further note that we are only checking rolling for fully dtype -# compliance (though both expanding and ewm inherit) -class Dtype: - window = 2 - - funcs = { - "count": lambda v: v.count(), - "max": lambda v: v.max(), - "min": lambda v: v.min(), - "sum": lambda v: v.sum(), - "mean": lambda v: v.mean(), - "std": lambda v: v.std(), - "var": lambda v: v.var(), - "median": lambda v: v.median(), - } - - def get_expects(self): - expects = { - "sr1": { - "count": Series([1, 2, 2, 2, 2], dtype="float64"), - "max": Series([np.nan, 1, 2, 3, 4], dtype="float64"), - "min": Series([np.nan, 0, 1, 2, 3], dtype="float64"), - "sum": Series([np.nan, 1, 3, 5, 7], dtype="float64"), - "mean": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"), - "std": Series([np.nan] + [np.sqrt(0.5)] * 4, dtype="float64"), - "var": Series([np.nan, 0.5, 0.5, 0.5, 0.5], dtype="float64"), - "median": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"), - }, - "sr2": { - "count": Series([1, 2, 2, 2, 2], dtype="float64"), - "max": Series([np.nan, 10, 8, 6, 4], dtype="float64"), - "min": Series([np.nan, 8, 6, 4, 2], dtype="float64"), - "sum": Series([np.nan, 18, 14, 10, 6], dtype="float64"), - "mean": Series([np.nan, 9, 7, 5, 3], dtype="float64"), - "std": Series([np.nan] + [np.sqrt(2)] * 4, dtype="float64"), - "var": Series([np.nan, 2, 2, 2, 2], dtype="float64"), - "median": Series([np.nan, 9, 7, 5, 3], dtype="float64"), - }, - "df": { - "count": DataFrame( - {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}, - dtype="float64", - ), - "max": DataFrame( - {0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])}, - dtype="float64", - ), - "min": DataFrame( - {0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])}, - dtype="float64", - ), - "sum": DataFrame( - { - 0: Series([np.nan, 2, 6, 10, 14]), - 1: Series([np.nan, 4, 8, 12, 16]), - }, - dtype="float64", - ), - "mean": DataFrame( - {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}, - dtype="float64", - ), - "std": DataFrame( - { - 0: Series([np.nan] + [np.sqrt(2)] * 4), - 1: Series([np.nan] + [np.sqrt(2)] * 4), - }, - dtype="float64", - ), - "var": DataFrame( - {0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])}, - dtype="float64", - ), - "median": DataFrame( - {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}, - dtype="float64", - ), - }, - } - return expects - - def _create_dtype_data(self, dtype): - sr1 = Series(np.arange(5), dtype=dtype) - sr2 = Series(np.arange(10, 0, -2), dtype=dtype) - df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype) - - data = {"sr1": sr1, "sr2": sr2, "df": df} - - return data - - def _create_data(self): - self.data = self._create_dtype_data(self.dtype) - self.expects = self.get_expects() - - def test_dtypes(self): - self._create_data() - for f_name, d_name in product(self.funcs.keys(), self.data.keys()): - - f = self.funcs[f_name] - d = self.data[d_name] - exp = self.expects[d_name][f_name] - self.check_dtypes(f, f_name, d, d_name, exp) - - def check_dtypes(self, f, f_name, d, d_name, exp): - roll = d.rolling(window=self.window) - result = f(roll) - tm.assert_almost_equal(result, exp) - - -class TestDtype_object(Dtype): - dtype = object - - -class Dtype_integer(Dtype): - pass - - -class TestDtype_int8(Dtype_integer): - dtype = np.int8 - - -class TestDtype_int16(Dtype_integer): - dtype = np.int16 - - -class TestDtype_int32(Dtype_integer): - dtype = np.int32 - - -class TestDtype_int64(Dtype_integer): - dtype = np.int64 - - -class Dtype_uinteger(Dtype): - pass - - -class TestDtype_uint8(Dtype_uinteger): - dtype = np.uint8 - - -class TestDtype_uint16(Dtype_uinteger): - dtype = np.uint16 - - -class TestDtype_uint32(Dtype_uinteger): - dtype = np.uint32 - - -class TestDtype_uint64(Dtype_uinteger): - dtype = np.uint64 - - -class Dtype_float(Dtype): - pass - - -class TestDtype_float16(Dtype_float): - dtype = np.float16 - - -class TestDtype_float32(Dtype_float): - dtype = np.float32 - - -class TestDtype_float64(Dtype_float): - dtype = np.float64 - - -class TestDtype_category(Dtype): - dtype = "category" - include_df = False - - def _create_dtype_data(self, dtype): - sr1 = Series(range(5), dtype=dtype) - sr2 = Series(range(10, 0, -2), dtype=dtype) - - data = {"sr1": sr1, "sr2": sr2} - - return data - - -class DatetimeLike(Dtype): - def check_dtypes(self, f, f_name, d, d_name, exp): - - roll = d.rolling(window=self.window) - if f_name == "count": - result = f(roll) - tm.assert_almost_equal(result, exp) - - else: - with pytest.raises(DataError): - f(roll) - - -class TestDtype_timedelta(DatetimeLike): - dtype = np.dtype("m8[ns]") - - -class TestDtype_datetime(DatetimeLike): - dtype = np.dtype("M8[ns]") - - -class TestDtype_datetime64UTC(DatetimeLike): - dtype = "datetime64[ns, UTC]" - - def _create_data(self): - pytest.skip( - "direct creation of extension dtype " - "datetime64[ns, UTC] is not supported ATM" - ) - - @pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning") class TestMoments(Base): def setup_method(self, method): @@ -1204,17 +943,19 @@ def test_centered_axis_validation(self): with pytest.raises(ValueError): (DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean()) - def test_rolling_sum(self): - self._check_moment_func(np.nansum, name="sum", zero_min_periods_equal=False) + def test_rolling_sum(self, raw): + self._check_moment_func( + np.nansum, name="sum", zero_min_periods_equal=False, raw=raw + ) - def test_rolling_count(self): + def test_rolling_count(self, raw): counter = lambda x: np.isfinite(x).astype(float).sum() self._check_moment_func( - counter, name="count", has_min_periods=False, fill_value=0 + counter, name="count", has_min_periods=False, fill_value=0, raw=raw ) - def test_rolling_mean(self): - self._check_moment_func(np.mean, name="mean") + def test_rolling_mean(self, raw): + self._check_moment_func(np.mean, name="mean", raw=raw) @td.skip_if_no_scipy def test_cmov_mean(self): @@ -1679,11 +1420,11 @@ def test_cmov_window_special_linear_range(self, win_types_special): ) tm.assert_series_equal(xp, rs) - def test_rolling_median(self): - self._check_moment_func(np.median, name="median") + def test_rolling_median(self, raw): + self._check_moment_func(np.median, name="median", raw=raw) - def test_rolling_min(self): - self._check_moment_func(np.min, name="min") + def test_rolling_min(self, raw): + self._check_moment_func(np.min, name="min", raw=raw) a = pd.Series([1, 2, 3, 4, 5]) result = a.rolling(window=100, min_periods=1).min() @@ -1693,8 +1434,8 @@ def test_rolling_min(self): with pytest.raises(ValueError): pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min() - def test_rolling_max(self): - self._check_moment_func(np.max, name="max") + def test_rolling_max(self, raw): + self._check_moment_func(np.max, name="max", raw=raw) a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64) b = a.rolling(window=100, min_periods=1).max() @@ -1704,7 +1445,7 @@ def test_rolling_max(self): pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max() @pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) - def test_rolling_quantile(self, q): + def test_rolling_quantile(self, q, raw): def scoreatpercentile(a, per): values = np.sort(a, axis=0) @@ -1725,7 +1466,7 @@ def scoreatpercentile(a, per): def quantile_func(x): return scoreatpercentile(x, q) - self._check_moment_func(quantile_func, name="quantile", quantile=q) + self._check_moment_func(quantile_func, name="quantile", quantile=q, raw=raw) def test_rolling_quantile_np_percentile(self): # #9413: Tests that rolling window's quantile default behavior @@ -1865,9 +1606,11 @@ def f(x): with pytest.raises(AttributeError): df.rolling(window).apply(f, raw=True) - def test_rolling_std(self): - self._check_moment_func(lambda x: np.std(x, ddof=1), name="std") - self._check_moment_func(lambda x: np.std(x, ddof=0), name="std", ddof=0) + def test_rolling_std(self, raw): + self._check_moment_func(lambda x: np.std(x, ddof=1), name="std", raw=raw) + self._check_moment_func( + lambda x: np.std(x, ddof=0), name="std", ddof=0, raw=raw + ) def test_rolling_std_1obs(self): vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0]) @@ -1903,26 +1646,29 @@ def test_rolling_std_neg_sqrt(self): b = a.ewm(span=3).std() assert np.isfinite(b[2:]).all() - def test_rolling_var(self): - self._check_moment_func(lambda x: np.var(x, ddof=1), name="var") - self._check_moment_func(lambda x: np.var(x, ddof=0), name="var", ddof=0) + def test_rolling_var(self, raw): + self._check_moment_func(lambda x: np.var(x, ddof=1), name="var", raw=raw) + self._check_moment_func( + lambda x: np.var(x, ddof=0), name="var", ddof=0, raw=raw + ) @td.skip_if_no_scipy - def test_rolling_skew(self): + def test_rolling_skew(self, raw): from scipy.stats import skew - self._check_moment_func(lambda x: skew(x, bias=False), name="skew") + self._check_moment_func(lambda x: skew(x, bias=False), name="skew", raw=raw) @td.skip_if_no_scipy - def test_rolling_kurt(self): + def test_rolling_kurt(self, raw): from scipy.stats import kurtosis - self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt") + self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt", raw=raw) def _check_moment_func( self, static_comp, name, + raw, has_min_periods=True, has_center=True, has_time_rule=True, @@ -2339,182 +2085,6 @@ def _check_ew(self, name=None, preserve_nan=False): assert result2.dtype == np.float_ -class TestPairwise: - - # GH 7738 - df1s = [ - DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]), - DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]), - DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]), - DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", "C"]), - DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1.0, 0]), - DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0.0, 1]), - DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", 1]), - DataFrame([[2.0, 4.0], [1.0, 2.0], [5.0, 2.0], [8.0, 1.0]], columns=[1, 0.0]), - DataFrame([[2, 4.0], [1, 2.0], [5, 2.0], [8, 1.0]], columns=[0, 1.0]), - DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.0]], columns=[1.0, "X"]), - ] - df2 = DataFrame( - [[None, 1, 1], [None, 1, 2], [None, 3, 2], [None, 8, 1]], - columns=["Y", "Z", "X"], - ) - s = Series([1, 1, 3, 8]) - - def compare(self, result, expected): - - # since we have sorted the results - # we can only compare non-nans - result = result.dropna().values - expected = expected.dropna().values - - tm.assert_numpy_array_equal(result, expected, check_dtype=False) - - @pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()]) - def test_no_flex(self, f): - - # DataFrame methods (which do not call _flex_binary_moment()) - - results = [f(df) for df in self.df1s] - for (df, result) in zip(self.df1s, results): - tm.assert_index_equal(result.index, df.columns) - tm.assert_index_equal(result.columns, df.columns) - for i, result in enumerate(results): - if i > 0: - self.compare(result, results[0]) - - @pytest.mark.parametrize( - "f", - [ - lambda x: x.expanding().cov(pairwise=True), - lambda x: x.expanding().corr(pairwise=True), - lambda x: x.rolling(window=3).cov(pairwise=True), - lambda x: x.rolling(window=3).corr(pairwise=True), - lambda x: x.ewm(com=3).cov(pairwise=True), - lambda x: x.ewm(com=3).corr(pairwise=True), - ], - ) - def test_pairwise_with_self(self, f): - - # DataFrame with itself, pairwise=True - # note that we may construct the 1st level of the MI - # in a non-monotonic way, so compare accordingly - results = [] - for i, df in enumerate(self.df1s): - result = f(df) - tm.assert_index_equal(result.index.levels[0], df.index, check_names=False) - tm.assert_numpy_array_equal( - safe_sort(result.index.levels[1]), safe_sort(df.columns.unique()) - ) - tm.assert_index_equal(result.columns, df.columns) - results.append(df) - - for i, result in enumerate(results): - if i > 0: - self.compare(result, results[0]) - - @pytest.mark.parametrize( - "f", - [ - lambda x: x.expanding().cov(pairwise=False), - lambda x: x.expanding().corr(pairwise=False), - lambda x: x.rolling(window=3).cov(pairwise=False), - lambda x: x.rolling(window=3).corr(pairwise=False), - lambda x: x.ewm(com=3).cov(pairwise=False), - lambda x: x.ewm(com=3).corr(pairwise=False), - ], - ) - def test_no_pairwise_with_self(self, f): - - # DataFrame with itself, pairwise=False - results = [f(df) for df in self.df1s] - for (df, result) in zip(self.df1s, results): - tm.assert_index_equal(result.index, df.index) - tm.assert_index_equal(result.columns, df.columns) - for i, result in enumerate(results): - if i > 0: - self.compare(result, results[0]) - - @pytest.mark.parametrize( - "f", - [ - lambda x, y: x.expanding().cov(y, pairwise=True), - lambda x, y: x.expanding().corr(y, pairwise=True), - lambda x, y: x.rolling(window=3).cov(y, pairwise=True), - lambda x, y: x.rolling(window=3).corr(y, pairwise=True), - lambda x, y: x.ewm(com=3).cov(y, pairwise=True), - lambda x, y: x.ewm(com=3).corr(y, pairwise=True), - ], - ) - def test_pairwise_with_other(self, f): - - # DataFrame with another DataFrame, pairwise=True - results = [f(df, self.df2) for df in self.df1s] - for (df, result) in zip(self.df1s, results): - tm.assert_index_equal(result.index.levels[0], df.index, check_names=False) - tm.assert_numpy_array_equal( - safe_sort(result.index.levels[1]), safe_sort(self.df2.columns.unique()) - ) - for i, result in enumerate(results): - if i > 0: - self.compare(result, results[0]) - - @pytest.mark.parametrize( - "f", - [ - lambda x, y: x.expanding().cov(y, pairwise=False), - lambda x, y: x.expanding().corr(y, pairwise=False), - lambda x, y: x.rolling(window=3).cov(y, pairwise=False), - lambda x, y: x.rolling(window=3).corr(y, pairwise=False), - lambda x, y: x.ewm(com=3).cov(y, pairwise=False), - lambda x, y: x.ewm(com=3).corr(y, pairwise=False), - ], - ) - def test_no_pairwise_with_other(self, f): - - # DataFrame with another DataFrame, pairwise=False - results = [ - f(df, self.df2) if df.columns.is_unique else None for df in self.df1s - ] - for (df, result) in zip(self.df1s, results): - if result is not None: - with catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - # we can have int and str columns - expected_index = df.index.union(self.df2.index) - expected_columns = df.columns.union(self.df2.columns) - tm.assert_index_equal(result.index, expected_index) - tm.assert_index_equal(result.columns, expected_columns) - else: - with pytest.raises(ValueError, match="'arg1' columns are not unique"): - f(df, self.df2) - with pytest.raises(ValueError, match="'arg2' columns are not unique"): - f(self.df2, df) - - @pytest.mark.parametrize( - "f", - [ - lambda x, y: x.expanding().cov(y), - lambda x, y: x.expanding().corr(y), - lambda x, y: x.rolling(window=3).cov(y), - lambda x, y: x.rolling(window=3).corr(y), - lambda x, y: x.ewm(com=3).cov(y), - lambda x, y: x.ewm(com=3).corr(y), - ], - ) - def test_pairwise_with_series(self, f): - - # DataFrame with a Series - results = [f(df, self.s) for df in self.df1s] + [ - f(self.s, df) for df in self.df1s - ] - for (df, result) in zip(self.df1s, results): - tm.assert_index_equal(result.index, df.index) - tm.assert_index_equal(result.columns, df.columns) - for i, result in enumerate(results): - if i > 0: - self.compare(result, results[0]) - - # create the data only once as we are not setting it def _create_consistency_data(): def create_series(): @@ -2741,7 +2311,7 @@ def _non_null_values(x): if mock_mean: # check that mean equals mock_mean expected = mock_mean(x) - assert_equal(mean_x, expected.astype("float64")) + tm.assert_equal(mean_x, expected.astype("float64")) # check that correlation of a series with itself is either 1 or NaN corr_x_x = corr(x, x) @@ -2755,18 +2325,18 @@ def _non_null_values(x): # check mean of constant series expected = x * np.nan expected[count_x >= max(min_periods, 1)] = exp - assert_equal(mean_x, expected) + tm.assert_equal(mean_x, expected) # check correlation of constant series with itself is NaN expected[:] = np.nan - assert_equal(corr_x_x, expected) + tm.assert_equal(corr_x_x, expected) if var_unbiased and var_biased and var_debiasing_factors: # check variance debiasing factors var_unbiased_x = var_unbiased(x) var_biased_x = var_biased(x) var_debiasing_factors_x = var_debiasing_factors(x) - assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) + tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) for (std, var, cov) in [ (std_biased, var_biased, cov_biased), @@ -2783,15 +2353,15 @@ def _non_null_values(x): assert not (cov_x_x < 0).any().any() # check that var(x) == cov(x, x) - assert_equal(var_x, cov_x_x) + tm.assert_equal(var_x, cov_x_x) # check that var(x) == std(x)^2 - assert_equal(var_x, std_x * std_x) + tm.assert_equal(var_x, std_x * std_x) if var is var_biased: # check that biased var(x) == mean(x^2) - mean(x)^2 mean_x2 = mean(x * x) - assert_equal(var_x, mean_x2 - (mean_x * mean_x)) + tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) if is_constant: # check that variance of constant series is identically 0 @@ -2800,7 +2370,7 @@ def _non_null_values(x): expected[count_x >= max(min_periods, 1)] = 0.0 if var is var_unbiased: expected[count_x < 2] = np.nan - assert_equal(var_x, expected) + tm.assert_equal(var_x, expected) if isinstance(x, Series): for (y, is_constant, no_nans) in self.data: @@ -2812,31 +2382,33 @@ def _non_null_values(x): # check that cor(x, y) is symmetric corr_x_y = corr(x, y) corr_y_x = corr(y, x) - assert_equal(corr_x_y, corr_y_x) + tm.assert_equal(corr_x_y, corr_y_x) if cov: # check that cov(x, y) is symmetric cov_x_y = cov(x, y) cov_y_x = cov(y, x) - assert_equal(cov_x_y, cov_y_x) + tm.assert_equal(cov_x_y, cov_y_x) # check that cov(x, y) == (var(x+y) - var(x) - # var(y)) / 2 var_x_plus_y = var(x + y) var_y = var(y) - assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y)) + tm.assert_equal( + cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y) + ) # check that corr(x, y) == cov(x, y) / (std(x) * # std(y)) std_y = std(y) - assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) + tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) if cov is cov_biased: # check that biased cov(x, y) == mean(x*y) - # mean(x)*mean(y) mean_y = mean(y) mean_x_times_y = mean(x * y) - assert_equal( + tm.assert_equal( cov_x_y, mean_x_times_y - (mean_x * mean_y) ) @@ -3026,7 +2598,7 @@ def test_expanding_consistency(self, min_periods): # GH 9422 if name in ["sum", "prod"]: - assert_equal(expanding_f_result, expanding_apply_f_result) + tm.assert_equal(expanding_f_result, expanding_apply_f_result) @pytest.mark.slow @pytest.mark.parametrize( @@ -3147,7 +2719,7 @@ def test_rolling_consistency(self, window, min_periods, center): # GH 9422 if name in ["sum", "prod"]: - assert_equal(rolling_f_result, rolling_apply_f_result) + tm.assert_equal(rolling_f_result, rolling_apply_f_result) # binary moments def test_rolling_cov(self): @@ -4057,695 +3629,3 @@ def test_expanding_apply(self, raw): result = r.apply(lambda x: x.sum(), raw=raw) expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw)) tm.assert_frame_equal(result, expected) - - -class TestRollingTS: - - # rolling time-series friendly - # xref GH13327 - - def setup_method(self, method): - - self.regular = DataFrame( - {"A": pd.date_range("20130101", periods=5, freq="s"), "B": range(5)} - ).set_index("A") - - self.ragged = DataFrame({"B": range(5)}) - self.ragged.index = [ - Timestamp("20130101 09:00:00"), - Timestamp("20130101 09:00:02"), - Timestamp("20130101 09:00:03"), - Timestamp("20130101 09:00:05"), - Timestamp("20130101 09:00:06"), - ] - - def test_doc_string(self): - - df = DataFrame( - {"B": [0, 1, 2, np.nan, 4]}, - index=[ - Timestamp("20130101 09:00:00"), - Timestamp("20130101 09:00:02"), - Timestamp("20130101 09:00:03"), - Timestamp("20130101 09:00:05"), - Timestamp("20130101 09:00:06"), - ], - ) - df - df.rolling("2s").sum() - - def test_valid(self): - - df = self.regular - - # not a valid freq - with pytest.raises(ValueError): - df.rolling(window="foobar") - - # not a datetimelike index - with pytest.raises(ValueError): - df.reset_index().rolling(window="foobar") - - # non-fixed freqs - for freq in ["2MS", pd.offsets.MonthBegin(2)]: - with pytest.raises(ValueError): - df.rolling(window=freq) - - for freq in ["1D", pd.offsets.Day(2), "2ms"]: - df.rolling(window=freq) - - # non-integer min_periods - for minp in [1.0, "foo", np.array([1, 2, 3])]: - with pytest.raises(ValueError): - df.rolling(window="1D", min_periods=minp) - - # center is not implemented - with pytest.raises(NotImplementedError): - df.rolling(window="1D", center=True) - - def test_on(self): - - df = self.regular - - # not a valid column - with pytest.raises(ValueError): - df.rolling(window="2s", on="foobar") - - # column is valid - df = df.copy() - df["C"] = pd.date_range("20130101", periods=len(df)) - df.rolling(window="2d", on="C").sum() - - # invalid columns - with pytest.raises(ValueError): - df.rolling(window="2d", on="B") - - # ok even though on non-selected - df.rolling(window="2d", on="C").B.sum() - - def test_monotonic_on(self): - - # on/index must be monotonic - df = DataFrame( - {"A": pd.date_range("20130101", periods=5, freq="s"), "B": range(5)} - ) - - assert df.A.is_monotonic - df.rolling("2s", on="A").sum() - - df = df.set_index("A") - assert df.index.is_monotonic - df.rolling("2s").sum() - - # non-monotonic - df.index = reversed(df.index.tolist()) - assert not df.index.is_monotonic - - with pytest.raises(ValueError): - df.rolling("2s").sum() - - df = df.reset_index() - with pytest.raises(ValueError): - df.rolling("2s", on="A").sum() - - def test_frame_on(self): - - df = DataFrame( - { - "B": range(5), - "C": pd.date_range("20130101 09:00:00", periods=5, freq="3s"), - } - ) - - df["A"] = [ - Timestamp("20130101 09:00:00"), - Timestamp("20130101 09:00:02"), - Timestamp("20130101 09:00:03"), - Timestamp("20130101 09:00:05"), - Timestamp("20130101 09:00:06"), - ] - - # we are doing simulating using 'on' - expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True) - - result = df.rolling("2s", on="A").B.sum() - tm.assert_series_equal(result, expected) - - # test as a frame - # we should be ignoring the 'on' as an aggregation column - # note that the expected is setting, computing, and resetting - # so the columns need to be switched compared - # to the actual result where they are ordered as in the - # original - expected = ( - df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]] - ) - - result = df.rolling("2s", on="A")[["B"]].sum() - tm.assert_frame_equal(result, expected) - - def test_frame_on2(self): - - # using multiple aggregation columns - df = DataFrame( - { - "A": [0, 1, 2, 3, 4], - "B": [0, 1, 2, np.nan, 4], - "C": Index( - [ - Timestamp("20130101 09:00:00"), - Timestamp("20130101 09:00:02"), - Timestamp("20130101 09:00:03"), - Timestamp("20130101 09:00:05"), - Timestamp("20130101 09:00:06"), - ] - ), - }, - columns=["A", "C", "B"], - ) - - expected1 = DataFrame( - {"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]}, - columns=["A", "C", "B"], - ) - - result = df.rolling("2s", on="C").sum() - expected = expected1 - tm.assert_frame_equal(result, expected) - - expected = Series([0, 1, 3, np.nan, 4], name="B") - result = df.rolling("2s", on="C").B.sum() - tm.assert_series_equal(result, expected) - - expected = expected1[["A", "B", "C"]] - result = df.rolling("2s", on="C")[["A", "B", "C"]].sum() - tm.assert_frame_equal(result, expected) - - def test_basic_regular(self): - - df = self.regular.copy() - - df.index = pd.date_range("20130101", periods=5, freq="D") - expected = df.rolling(window=1, min_periods=1).sum() - result = df.rolling(window="1D").sum() - tm.assert_frame_equal(result, expected) - - df.index = pd.date_range("20130101", periods=5, freq="2D") - expected = df.rolling(window=1, min_periods=1).sum() - result = df.rolling(window="2D", min_periods=1).sum() - tm.assert_frame_equal(result, expected) - - expected = df.rolling(window=1, min_periods=1).sum() - result = df.rolling(window="2D", min_periods=1).sum() - tm.assert_frame_equal(result, expected) - - expected = df.rolling(window=1).sum() - result = df.rolling(window="2D").sum() - tm.assert_frame_equal(result, expected) - - def test_min_periods(self): - - # compare for min_periods - df = self.regular - - # these slightly different - expected = df.rolling(2, min_periods=1).sum() - result = df.rolling("2s").sum() - tm.assert_frame_equal(result, expected) - - expected = df.rolling(2, min_periods=1).sum() - result = df.rolling("2s", min_periods=1).sum() - tm.assert_frame_equal(result, expected) - - def test_closed(self): - - # xref GH13965 - - df = DataFrame( - {"A": [1] * 5}, - index=[ - Timestamp("20130101 09:00:01"), - Timestamp("20130101 09:00:02"), - Timestamp("20130101 09:00:03"), - Timestamp("20130101 09:00:04"), - Timestamp("20130101 09:00:06"), - ], - ) - - # closed must be 'right', 'left', 'both', 'neither' - with pytest.raises(ValueError): - self.regular.rolling(window="2s", closed="blabla") - - expected = df.copy() - expected["A"] = [1.0, 2, 2, 2, 1] - result = df.rolling("2s", closed="right").sum() - tm.assert_frame_equal(result, expected) - - # default should be 'right' - result = df.rolling("2s").sum() - tm.assert_frame_equal(result, expected) - - expected = df.copy() - expected["A"] = [1.0, 2, 3, 3, 2] - result = df.rolling("2s", closed="both").sum() - tm.assert_frame_equal(result, expected) - - expected = df.copy() - expected["A"] = [np.nan, 1.0, 2, 2, 1] - result = df.rolling("2s", closed="left").sum() - tm.assert_frame_equal(result, expected) - - expected = df.copy() - expected["A"] = [np.nan, 1.0, 1, 1, np.nan] - result = df.rolling("2s", closed="neither").sum() - tm.assert_frame_equal(result, expected) - - def test_ragged_sum(self): - - df = self.ragged - result = df.rolling(window="1s", min_periods=1).sum() - expected = df.copy() - expected["B"] = [0.0, 1, 2, 3, 4] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=1).sum() - expected = df.copy() - expected["B"] = [0.0, 1, 3, 3, 7] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=2).sum() - expected = df.copy() - expected["B"] = [np.nan, np.nan, 3, np.nan, 7] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="3s", min_periods=1).sum() - expected = df.copy() - expected["B"] = [0.0, 1, 3, 5, 7] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="3s").sum() - expected = df.copy() - expected["B"] = [0.0, 1, 3, 5, 7] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="4s", min_periods=1).sum() - expected = df.copy() - expected["B"] = [0.0, 1, 3, 6, 9] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="4s", min_periods=3).sum() - expected = df.copy() - expected["B"] = [np.nan, np.nan, 3, 6, 9] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="5s", min_periods=1).sum() - expected = df.copy() - expected["B"] = [0.0, 1, 3, 6, 10] - tm.assert_frame_equal(result, expected) - - def test_ragged_mean(self): - - df = self.ragged - result = df.rolling(window="1s", min_periods=1).mean() - expected = df.copy() - expected["B"] = [0.0, 1, 2, 3, 4] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=1).mean() - expected = df.copy() - expected["B"] = [0.0, 1, 1.5, 3.0, 3.5] - tm.assert_frame_equal(result, expected) - - def test_ragged_median(self): - - df = self.ragged - result = df.rolling(window="1s", min_periods=1).median() - expected = df.copy() - expected["B"] = [0.0, 1, 2, 3, 4] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=1).median() - expected = df.copy() - expected["B"] = [0.0, 1, 1.5, 3.0, 3.5] - tm.assert_frame_equal(result, expected) - - def test_ragged_quantile(self): - - df = self.ragged - result = df.rolling(window="1s", min_periods=1).quantile(0.5) - expected = df.copy() - expected["B"] = [0.0, 1, 2, 3, 4] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=1).quantile(0.5) - expected = df.copy() - expected["B"] = [0.0, 1, 1.5, 3.0, 3.5] - tm.assert_frame_equal(result, expected) - - def test_ragged_std(self): - - df = self.ragged - result = df.rolling(window="1s", min_periods=1).std(ddof=0) - expected = df.copy() - expected["B"] = [0.0] * 5 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="1s", min_periods=1).std(ddof=1) - expected = df.copy() - expected["B"] = [np.nan] * 5 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="3s", min_periods=1).std(ddof=0) - expected = df.copy() - expected["B"] = [0.0] + [0.5] * 4 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="5s", min_periods=1).std(ddof=1) - expected = df.copy() - expected["B"] = [np.nan, 0.707107, 1.0, 1.0, 1.290994] - tm.assert_frame_equal(result, expected) - - def test_ragged_var(self): - - df = self.ragged - result = df.rolling(window="1s", min_periods=1).var(ddof=0) - expected = df.copy() - expected["B"] = [0.0] * 5 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="1s", min_periods=1).var(ddof=1) - expected = df.copy() - expected["B"] = [np.nan] * 5 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="3s", min_periods=1).var(ddof=0) - expected = df.copy() - expected["B"] = [0.0] + [0.25] * 4 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="5s", min_periods=1).var(ddof=1) - expected = df.copy() - expected["B"] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.0] - tm.assert_frame_equal(result, expected) - - def test_ragged_skew(self): - - df = self.ragged - result = df.rolling(window="3s", min_periods=1).skew() - expected = df.copy() - expected["B"] = [np.nan] * 5 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="5s", min_periods=1).skew() - expected = df.copy() - expected["B"] = [np.nan] * 2 + [0.0, 0.0, 0.0] - tm.assert_frame_equal(result, expected) - - def test_ragged_kurt(self): - - df = self.ragged - result = df.rolling(window="3s", min_periods=1).kurt() - expected = df.copy() - expected["B"] = [np.nan] * 5 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="5s", min_periods=1).kurt() - expected = df.copy() - expected["B"] = [np.nan] * 4 + [-1.2] - tm.assert_frame_equal(result, expected) - - def test_ragged_count(self): - - df = self.ragged - result = df.rolling(window="1s", min_periods=1).count() - expected = df.copy() - expected["B"] = [1.0, 1, 1, 1, 1] - tm.assert_frame_equal(result, expected) - - df = self.ragged - result = df.rolling(window="1s").count() - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=1).count() - expected = df.copy() - expected["B"] = [1.0, 1, 2, 1, 2] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=2).count() - expected = df.copy() - expected["B"] = [np.nan, np.nan, 2, np.nan, 2] - tm.assert_frame_equal(result, expected) - - def test_regular_min(self): - - df = DataFrame( - { - "A": pd.date_range("20130101", periods=5, freq="s"), - "B": [0.0, 1, 2, 3, 4], - } - ).set_index("A") - result = df.rolling("1s").min() - expected = df.copy() - expected["B"] = [0.0, 1, 2, 3, 4] - tm.assert_frame_equal(result, expected) - - df = DataFrame( - {"A": pd.date_range("20130101", periods=5, freq="s"), "B": [5, 4, 3, 4, 5]} - ).set_index("A") - - tm.assert_frame_equal(result, expected) - result = df.rolling("2s").min() - expected = df.copy() - expected["B"] = [5.0, 4, 3, 3, 4] - tm.assert_frame_equal(result, expected) - - result = df.rolling("5s").min() - expected = df.copy() - expected["B"] = [5.0, 4, 3, 3, 3] - tm.assert_frame_equal(result, expected) - - def test_ragged_min(self): - - df = self.ragged - - result = df.rolling(window="1s", min_periods=1).min() - expected = df.copy() - expected["B"] = [0.0, 1, 2, 3, 4] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=1).min() - expected = df.copy() - expected["B"] = [0.0, 1, 1, 3, 3] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="5s", min_periods=1).min() - expected = df.copy() - expected["B"] = [0.0, 0, 0, 1, 1] - tm.assert_frame_equal(result, expected) - - def test_perf_min(self): - - N = 10000 - - dfp = DataFrame( - {"B": np.random.randn(N)}, - index=pd.date_range("20130101", periods=N, freq="s"), - ) - expected = dfp.rolling(2, min_periods=1).min() - result = dfp.rolling("2s").min() - assert ((result - expected) < 0.01).all().bool() - - expected = dfp.rolling(200, min_periods=1).min() - result = dfp.rolling("200s").min() - assert ((result - expected) < 0.01).all().bool() - - def test_ragged_max(self): - - df = self.ragged - - result = df.rolling(window="1s", min_periods=1).max() - expected = df.copy() - expected["B"] = [0.0, 1, 2, 3, 4] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=1).max() - expected = df.copy() - expected["B"] = [0.0, 1, 2, 3, 4] - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="5s", min_periods=1).max() - expected = df.copy() - expected["B"] = [0.0, 1, 2, 3, 4] - tm.assert_frame_equal(result, expected) - - def test_ragged_apply(self, raw): - - df = self.ragged - - f = lambda x: 1 - result = df.rolling(window="1s", min_periods=1).apply(f, raw=raw) - expected = df.copy() - expected["B"] = 1.0 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="2s", min_periods=1).apply(f, raw=raw) - expected = df.copy() - expected["B"] = 1.0 - tm.assert_frame_equal(result, expected) - - result = df.rolling(window="5s", min_periods=1).apply(f, raw=raw) - expected = df.copy() - expected["B"] = 1.0 - tm.assert_frame_equal(result, expected) - - def test_all(self): - - # simple comparison of integer vs time-based windowing - df = self.regular * 2 - er = df.rolling(window=1) - r = df.rolling(window="1s") - - for f in [ - "sum", - "mean", - "count", - "median", - "std", - "var", - "kurt", - "skew", - "min", - "max", - ]: - - result = getattr(r, f)() - expected = getattr(er, f)() - tm.assert_frame_equal(result, expected) - - result = r.quantile(0.5) - expected = er.quantile(0.5) - tm.assert_frame_equal(result, expected) - - def test_all_apply(self, raw): - - df = self.regular * 2 - er = df.rolling(window=1) - r = df.rolling(window="1s") - - result = r.apply(lambda x: 1, raw=raw) - expected = er.apply(lambda x: 1, raw=raw) - tm.assert_frame_equal(result, expected) - - def test_all2(self): - - # more sophisticated comparison of integer vs. - # time-based windowing - df = DataFrame( - {"B": np.arange(50)}, index=pd.date_range("20130101", periods=50, freq="H") - ) - # in-range data - dft = df.between_time("09:00", "16:00") - - r = dft.rolling(window="5H") - - for f in [ - "sum", - "mean", - "count", - "median", - "std", - "var", - "kurt", - "skew", - "min", - "max", - ]: - - result = getattr(r, f)() - - # we need to roll the days separately - # to compare with a time-based roll - # finally groupby-apply will return a multi-index - # so we need to drop the day - def agg_by_day(x): - x = x.between_time("09:00", "16:00") - return getattr(x.rolling(5, min_periods=1), f)() - - expected = ( - df.groupby(df.index.day) - .apply(agg_by_day) - .reset_index(level=0, drop=True) - ) - - tm.assert_frame_equal(result, expected) - - def test_groupby_monotonic(self): - - # GH 15130 - # we don't need to validate monotonicity when grouping - - data = [ - ["David", "1/1/2015", 100], - ["David", "1/5/2015", 500], - ["David", "5/30/2015", 50], - ["David", "7/25/2015", 50], - ["Ryan", "1/4/2014", 100], - ["Ryan", "1/19/2015", 500], - ["Ryan", "3/31/2016", 50], - ["Joe", "7/1/2015", 100], - ["Joe", "9/9/2015", 500], - ["Joe", "10/15/2015", 50], - ] - - df = DataFrame(data=data, columns=["name", "date", "amount"]) - df["date"] = pd.to_datetime(df["date"]) - - expected = ( - df.set_index("date") - .groupby("name") - .apply(lambda x: x.rolling("180D")["amount"].sum()) - ) - result = df.groupby("name").rolling("180D", on="date")["amount"].sum() - tm.assert_series_equal(result, expected) - - def test_non_monotonic(self): - # GH 13966 (similar to #15130, closed by #15175) - - dates = pd.date_range(start="2016-01-01 09:30:00", periods=20, freq="s") - df = DataFrame( - { - "A": [1] * 20 + [2] * 12 + [3] * 8, - "B": np.concatenate((dates, dates)), - "C": np.arange(40), - } - ) - - result = df.groupby("A").rolling("4s", on="B").C.mean() - expected = ( - df.set_index("B").groupby("A").apply(lambda x: x.rolling("4s")["C"].mean()) - ) - tm.assert_series_equal(result, expected) - - df2 = df.sort_values("B") - result = df2.groupby("A").rolling("4s", on="B").C.mean() - tm.assert_series_equal(result, expected) - - def test_rolling_cov_offset(self): - # GH16058 - - idx = pd.date_range("2017-01-01", periods=24, freq="1h") - ss = Series(np.arange(len(idx)), index=idx) - - result = ss.rolling("2h").cov() - expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx) - tm.assert_series_equal(result, expected) - - expected2 = ss.rolling(2, min_periods=1).cov() - tm.assert_series_equal(result, expected2) - - result = ss.rolling("3h").cov() - expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx) - tm.assert_series_equal(result, expected) - - expected2 = ss.rolling(3, min_periods=1).cov() - tm.assert_series_equal(result, expected2)
xref #19228, #26807 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` `test_window.py` was getting fairly unwieldily. There a couple test classes that use a non-pytest-idomatic `Base` class, so those test classes are kept together for now. I split out the rest of the test classes into separate files as they made sense. I think the classes in `test_window.py` need further untangling before a `conftest.py` file can be made for the new directory. Open to additional quick win optimizations. cc @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/27305
2019-07-09T07:26:32Z
2019-07-10T18:36:00Z
2019-07-10T18:36:00Z
2019-07-10T20:49:11Z
PERF: Suppress ix warnings benchmarks
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index af4741f94d294..5008b77d9fb28 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -1,3 +1,4 @@ +import warnings import string import numpy as np @@ -320,9 +321,10 @@ class Dropna: def setup(self, how, axis): self.df = DataFrame(np.random.randn(10000, 1000)) - self.df.ix[50:1000, 20:50] = np.nan - self.df.ix[2000:3000] = np.nan - self.df.ix[:, 60:70] = np.nan + with warnings.catch_warnings(record=True): + self.df.ix[50:1000, 20:50] = np.nan + self.df.ix[2000:3000] = np.nan + self.df.ix[:, 60:70] = np.nan self.df_mixed = self.df.copy() self.df_mixed["foo"] = "bar" @@ -340,9 +342,10 @@ class Count: def setup(self, axis): self.df = DataFrame(np.random.randn(10000, 1000)) - self.df.ix[50:1000, 20:50] = np.nan - self.df.ix[2000:3000] = np.nan - self.df.ix[:, 60:70] = np.nan + with warnings.catch_warnings(record=True): + self.df.ix[50:1000, 20:50] = np.nan + self.df.ix[2000:3000] = np.nan + self.df.ix[:, 60:70] = np.nan self.df_mixed = self.df.copy() self.df_mixed["foo"] = "bar" @@ -561,7 +564,8 @@ def setup(self): self.df = DataFrame(np.random.randn(10, 10000)) def time_frame_get_dtype_counts(self): - self.df.get_dtype_counts() + with warnings.catch_warnings(record=True): + self.df.get_dtype_counts() def time_info(self): self.df.info() diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 489e5c4cd63ea..e8368f269d08a 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -66,16 +66,20 @@ def time_iloc_slice(self, index, index_structure): self.data.iloc[:800000] def time_ix_array(self, index, index_structure): - self.data.ix[self.array] + with warnings.catch_warnings(record=True): + self.data.ix[self.array] def time_ix_list_like(self, index, index_structure): - self.data.ix[[800000]] + with warnings.catch_warnings(record=True): + self.data.ix[[800000]] def time_ix_scalar(self, index, index_structure): - self.data.ix[800000] + with warnings.catch_warnings(record=True): + self.data.ix[800000] def time_ix_slice(self, index, index_structure): - self.data.ix[:800000] + with warnings.catch_warnings(record=True): + self.data.ix[:800000] def time_loc_array(self, index, index_structure): self.data.loc[self.array] @@ -131,7 +135,8 @@ class DataFrameStringIndexing: def setup(self): index = tm.makeStringIndex(1000) columns = tm.makeStringIndex(30) - self.df = DataFrame(np.random.randn(1000, 30), index=index, columns=columns) + with warnings.catch_warnings(record=True): + self.df = DataFrame(np.random.randn(1000, 30), index=index, columns=columns) self.idx_scalar = index[100] self.col_scalar = columns[10] self.bool_indexer = self.df[self.col_scalar] > 0 @@ -142,7 +147,8 @@ def time_get_value(self): self.df.get_value(self.idx_scalar, self.col_scalar) def time_ix(self): - self.df.ix[self.idx_scalar, self.col_scalar] + with warnings.catch_warnings(record=True): + self.df.ix[self.idx_scalar, self.col_scalar] def time_loc(self): self.df.loc[self.idx_scalar, self.col_scalar] @@ -206,24 +212,27 @@ def setup(self): self.df = DataFrame(self.s) n = 100000 - self.mdt = DataFrame( - { - "A": np.random.choice(range(10000, 45000, 1000), n), - "B": np.random.choice(range(10, 400), n), - "C": np.random.choice(range(1, 150), n), - "D": np.random.choice(range(10000, 45000), n), - "x": np.random.choice(range(400), n), - "y": np.random.choice(range(25), n), - } - ) + with warnings.catch_warnings(record=True): + self.mdt = DataFrame( + { + "A": np.random.choice(range(10000, 45000, 1000), n), + "B": np.random.choice(range(10, 400), n), + "C": np.random.choice(range(1, 150), n), + "D": np.random.choice(range(10000, 45000), n), + "x": np.random.choice(range(400), n), + "y": np.random.choice(range(25), n), + } + ) self.idx = IndexSlice[20000:30000, 20:30, 35:45, 30000:40000] self.mdt = self.mdt.set_index(["A", "B", "C", "D"]).sort_index() def time_series_ix(self): - self.s.ix[999] + with warnings.catch_warnings(record=True): + self.s.ix[999] def time_frame_ix(self): - self.df.ix[999] + with warnings.catch_warnings(record=True): + self.df.ix[999] def time_index_slice(self): self.mdt.loc[self.idx, :] @@ -300,7 +309,8 @@ def time_lookup_iloc(self, s): s.iloc def time_lookup_ix(self, s): - s.ix + with warnings.catch_warnings(record=True): + s.ix def time_lookup_loc(self, s): s.loc diff --git a/asv_bench/benchmarks/io/msgpack.py b/asv_bench/benchmarks/io/msgpack.py index c43df7c2e91ed..d97b4ae13f0bd 100644 --- a/asv_bench/benchmarks/io/msgpack.py +++ b/asv_bench/benchmarks/io/msgpack.py @@ -1,3 +1,4 @@ +import warnings import numpy as np from pandas import DataFrame, date_range, read_msgpack import pandas.util.testing as tm @@ -16,7 +17,8 @@ def setup(self): index=date_range("20000101", periods=N, freq="H"), ) self.df["object"] = tm.makeStringIndex(N) - self.df.to_msgpack(self.fname) + with warnings.catch_warnings(record=True): + self.df.to_msgpack(self.fname) def time_read_msgpack(self): read_msgpack(self.fname) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4e9f74162ae78..37104f1cb8f67 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5603,6 +5603,7 @@ def get_dtype_counts(self): FutureWarning, stacklevel=2, ) + from pandas import Series return Series(self._data.get_dtype_counts())
- [x] closes #27217 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ~~TODO: add tests for suppressed warnings.~~
https://api.github.com/repos/pandas-dev/pandas/pulls/27304
2019-07-09T03:43:17Z
2019-07-09T21:00:12Z
2019-07-09T21:00:12Z
2019-07-09T21:00:19Z
BUG: appending a Timedelta to Series incorrectly casts to integer
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index daca08d69346d..237d2fec82520 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1050,6 +1050,7 @@ Indexing - Bug in :class:`Categorical` and :class:`CategoricalIndex` with :class:`Interval` values when using the ``in`` operator (``__contains``) with objects that are not comparable to the values in the ``Interval`` (:issue:`23705`) - Bug in :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` on a :class:`DataFrame` with a single timezone-aware datetime64[ns] column incorrectly returning a scalar instead of a :class:`Series` (:issue:`27110`) - Bug in :class:`CategoricalIndex` and :class:`Categorical` incorrectly raising ``ValueError`` instead of ``TypeError`` when a list is passed using the ``in`` operator (``__contains__``) (:issue:`21729`) +- Bug in setting a new value in a :class:`Series` with a :class:`Timedelta` object incorrectly casting the value to an integer (:issue:`22717`) - Missing diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 1f25be8b9e31e..c31d6538ad2c3 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -20,6 +20,7 @@ is_sequence, is_sparse, ) +from pandas.core.dtypes.concat import _concat_compat from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.missing import _infer_fill_value, isna @@ -432,11 +433,9 @@ def _setitem_with_indexer(self, indexer, value): # this preserves dtype of the value new_values = Series([value])._values if len(self.obj._values): - try: - new_values = np.concatenate([self.obj._values, new_values]) - except TypeError: - as_obj = self.obj.astype(object) - new_values = np.concatenate([as_obj, new_values]) + # GH#22717 handle casting compatibility that np.concatenate + # does incorrectly + new_values = _concat_compat([self.obj._values, new_values]) self.obj._data = self.obj._constructor( new_values, index=new_index, name=self.obj.name )._data diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index c8342c54e9b5d..1fb1dd3bb998a 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -654,6 +654,29 @@ def test_timedelta_assignment(): tm.assert_series_equal(s, expected) +@pytest.mark.parametrize( + "td", + [ + pd.Timedelta("9 days"), + pd.Timedelta("9 days").to_timedelta64(), + pd.Timedelta("9 days").to_pytimedelta(), + ], +) +def test_append_timedelta_does_not_cast(td): + # GH#22717 inserting a Timedelta should _not_ cast to int64 + expected = pd.Series(["x", td], index=[0, "td"], dtype=object) + + ser = pd.Series(["x"]) + ser["td"] = td + tm.assert_series_equal(ser, expected) + assert isinstance(ser["td"], pd.Timedelta) + + ser = pd.Series(["x"]) + ser.loc["td"] = pd.Timedelta("9 days") + tm.assert_series_equal(ser, expected) + assert isinstance(ser["td"], pd.Timedelta) + + def test_underlying_data_conversion(): # GH 4080 df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]})
- [x] closes #22717 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27303
2019-07-09T02:26:49Z
2019-07-10T18:37:48Z
2019-07-10T18:37:48Z
2019-07-10T22:57:02Z
check early for non-scalar default_fill_value
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index f195e4b5f4e37..60060a4a2d1fa 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -6,6 +6,7 @@ import numpy as np +from pandas._libs.lib import is_scalar, item_from_zerodim from pandas._libs.sparse import BlockIndex, get_blocks from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender @@ -74,6 +75,8 @@ def __init__( dtype=None, copy=False, ): + if not is_scalar(default_fill_value): + raise ValueError("'default_fill_value' must be a scalar") warnings.warn(depr_msg, FutureWarning, stacklevel=2) # pick up the defaults from the Sparse structures @@ -666,7 +669,7 @@ def _get_op_result_fill_value(self, other, func): fill_value = np.nan else: fill_value = func(np.float64(own_default), np.float64(other.fill_value)) - + fill_value = item_from_zerodim(fill_value) else: raise NotImplementedError(type(other)) diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 6527d41eac841..61ed9c128cc5e 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -136,6 +136,12 @@ def test_constructor(self, float_frame, float_frame_int_kind, float_frame_fill0) repr(float_frame) + def test_constructor_fill_value_not_scalar_raises(self): + d = {"b": [2, 3], "a": [0, 1]} + fill_value = np.array(np.nan) + with pytest.raises(ValueError, match="must be a scalar"): + SparseDataFrame(data=d, default_fill_value=fill_value) + def test_constructor_dict_order(self): # GH19018 # initialization ordering: by insertion order if python>= 3.6, else
Broken off of an in-progress division-by-zero branch
https://api.github.com/repos/pandas-dev/pandas/pulls/27302
2019-07-09T01:23:29Z
2019-07-10T16:55:55Z
2019-07-10T16:55:55Z
2019-07-10T17:12:37Z
CLN: assorted cleanups
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index dc4e6e85f6e7d..ea61467080291 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -12,9 +12,10 @@ from pandas._config import get_option +from pandas._libs.lib import values_from_object + from pandas.core.dtypes.generic import ABCDataFrame -import pandas.core.common as com from pandas.core.computation.check import _NUMEXPR_INSTALLED if _NUMEXPR_INSTALLED: @@ -129,9 +130,7 @@ def _evaluate_numexpr(op, op_str, a, b, truediv=True, reversed=False, **eval_kwa def _where_standard(cond, a, b): return np.where( - com.values_from_object(cond), - com.values_from_object(a), - com.values_from_object(b), + values_from_object(cond), values_from_object(a), values_from_object(b) ) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index bf6ebf1abe760..5785dbfbd6cac 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -397,10 +397,6 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): raise ValueError("Limit must be an integer") if limit < 1: raise ValueError("Limit must be greater than 0") - if self.ndim > 2: - raise NotImplementedError( - "number of dimensions for 'fillna' is currently limited to 2" - ) mask[mask.cumsum(self.ndim - 1) > limit] = False if not self._can_hold_na: @@ -853,6 +849,8 @@ def setitem(self, indexer, value): `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ + transpose = self.ndim == 2 + # coerce None values, if appropriate if value is None: if self.is_numeric: @@ -901,8 +899,8 @@ def setitem(self, indexer, value): dtype, _ = maybe_promote(arr_value.dtype) values = values.astype(dtype) - transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) - values = transf(values) + if transpose: + values = values.T # length checking check_setitem_lengths(indexer, value, values) @@ -961,7 +959,9 @@ def _is_empty_indexer(indexer): # coerce and try to infer the dtypes of the result values = self._try_coerce_and_cast_result(values, dtype) - block = self.make_block(transf(values)) + if transpose: + values = values.T + block = self.make_block(values) return block def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index b3c74aaaa5701..cd678a235cfc1 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -583,8 +583,9 @@ def astype(self, dtype, **kwargs): def convert(self, **kwargs): return self.apply("convert", **kwargs) - def replace(self, **kwargs): - return self.apply("replace", **kwargs) + def replace(self, value, **kwargs): + assert np.ndim(value) == 0, value + return self.apply("replace", value=value, **kwargs) def replace_list(self, src_list, dest_list, inplace=False, regex=False): """ do a list replace """ @@ -617,6 +618,7 @@ def comp(s, regex=False): # replace ALWAYS will return a list rb = [blk if inplace else blk.copy()] for i, (s, d) in enumerate(zip(src_list, dest_list)): + # TODO: assert/validate that `d` is always a scalar? new_rb = [] for b in rb: m = masks[i][b.mgr_locs.indexer] diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index f9112dbb1e4ab..d735ab3ad2535 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -11,7 +11,7 @@ import numpy as np -from pandas._libs import lib, ops as libops +from pandas._libs import Timedelta, Timestamp, lib, ops as libops from pandas.errors import NullFrequencyError from pandas.util._decorators import Appender @@ -87,7 +87,7 @@ def get_op_result_name(left, right): Usually a string """ # `left` is always a pd.Series when called from within ops - if isinstance(right, (ABCSeries, pd.Index)): + if isinstance(right, (ABCSeries, ABCIndexClass)): name = _maybe_match_name(left, right) else: name = left.name @@ -151,14 +151,14 @@ def maybe_upcast_for_op(obj): # GH#22390 cast up to Timedelta to rely on Timedelta # implementation; otherwise operation against numeric-dtype # raises TypeError - return pd.Timedelta(obj) + return Timedelta(obj) elif isinstance(obj, np.timedelta64) and not isna(obj): # In particular non-nanosecond timedelta64 needs to be cast to # nanoseconds, or else we get undesired behavior like # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') # The isna check is to avoid casting timedelta64("NaT"), which would # return NaT and incorrectly be treated as a datetime-NaT. - return pd.Timedelta(obj) + return Timedelta(obj) elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to @@ -1864,7 +1864,7 @@ def wrapper(self, other, axis=None): ) msg = "\n".join(textwrap.wrap(msg.format(future=future))) warnings.warn(msg, FutureWarning, stacklevel=2) - other = pd.Timestamp(other) + other = Timestamp(other) res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex) @@ -1890,7 +1890,7 @@ def wrapper(self, other, axis=None): res_values, index=self.index, name=res_name ).rename(res_name) - elif isinstance(other, (np.ndarray, pd.Index)): + elif isinstance(other, (np.ndarray, ABCIndexClass)): # do not check length of zerodim array # as it will broadcast if other.ndim != 0 and len(self) != len(other): diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 37a885e33847f..49d11f58ebe08 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -411,7 +411,7 @@ def check_single_invert_op(self, lhs, cmp1, rhs): ) def check_compound_invert_op(self, lhs, cmp1, rhs): - skip_these = "in", "not in" + skip_these = ["in", "not in"] ex = "~(lhs {0} rhs)".format(cmp1) msg = ( diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 970fd465fd4ec..9c687f036aa68 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1432,6 +1432,7 @@ def test_to_jsonl(self): assert result == expected assert_frame_equal(pd.read_json(result, lines=True), df) + # TODO: there is a near-identical test for pytables; can we share? def test_latin_encoding(self): # GH 13774 pytest.skip("encoding not implemented in .to_json(), xref #13774") diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 5b57b5ba2dbae..89557445cafb4 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -24,7 +24,7 @@ class TestSeriesFlexArithmetic: ], ) @pytest.mark.parametrize( - "opname", ["add", "sub", "mul", "floordiv", "truediv", "div", "pow"] + "opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"] ) def test_flex_method_equivalence(self, opname, ts): # check that Series.{opname} behaves like Series.__{opname}__, @@ -34,15 +34,8 @@ def test_flex_method_equivalence(self, opname, ts): other = ts[1](tser) check_reverse = ts[2] - if opname == "div": - pytest.skip("div test only for Py3") - op = getattr(Series, opname) - - if op == "div": - alt = operator.truediv - else: - alt = getattr(operator, opname) + alt = getattr(operator, opname) result = op(series, other) expected = alt(series, other)
broken off from other branches
https://api.github.com/repos/pandas-dev/pandas/pulls/27301
2019-07-09T00:54:42Z
2019-07-09T20:46:05Z
2019-07-09T20:46:05Z
2019-07-10T00:04:40Z
ENH: maybe_convert_objects seen NaT speed-up
diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index 436093ef195ef..7d97f2c740acb 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -2,6 +2,8 @@ import numpy as np +from pandas._libs import lib + import pandas as pd from pandas.util import testing as tm @@ -13,6 +15,19 @@ pass +class MaybeConvertObjects: + def setup(self): + N = 10 ** 5 + + data = list(range(N)) + data[0] = pd.NaT + data = np.array(data) + self.data = data + + def time_maybe_convert_objects(self): + lib.maybe_convert_objects(self.data) + + class Factorize: params = [[True, False], ["int", "uint", "float", "string"]] diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 68ecb4c487a1e..8c472cb3121d2 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -940,7 +940,6 @@ Performance improvements - For :meth:`to_datetime` changed default value of cache parameter to ``True`` (:issue:`26043`) - Improved performance of :class:`DatetimeIndex` and :class:`PeriodIndex` slicing given non-unique, monotonic data (:issue:`27136`). - .. _whatsnew_0250.bug_fixes: Bug fixes diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1df220029def6..1936404b75602 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1955,6 +1955,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, seen.timedelta_ = 1 if not (convert_datetime or convert_timedelta): seen.object_ = 1 + break elif util.is_bool_object(val): seen.bool_ = 1 bools[i] = val
- [X] closes #27299 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27300
2019-07-08T22:34:58Z
2019-07-09T20:42:59Z
2019-07-09T20:42:59Z
2019-07-09T20:43:12Z
REF: check can_hold_element instead of try/except
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index bf6ebf1abe760..be0661c895485 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -7,7 +7,7 @@ import numpy as np -from pandas._libs import lib, tslib, tslibs +from pandas._libs import NaT, lib, tslib, tslibs import pandas._libs.internals as libinternals from pandas._libs.tslibs import Timedelta, conversion, is_null_datetimelike from pandas.util._validators import validate_bool_kwarg @@ -409,33 +409,29 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): else: return self.copy() - # fillna, but if we cannot coerce, then try again as an ObjectBlock - try: - # Note: we only call try_coerce_args to let it raise - self._try_coerce_args(value) - except (TypeError, ValueError): - - # we can't process the value, but nothing to do - if not mask.any(): - return self if inplace else self.copy() - - # operate column-by-column - def f(m, v, i): - block = self.coerce_to_target_dtype(value) - - # slice out our block - if i is not None: - block = block.getitem_block(slice(i, i + 1)) - return block.fillna(value, limit=limit, inplace=inplace, downcast=None) - - return self.split_and_operate(mask, f, inplace) - else: + if self._can_hold_element(value): + # equivalent: self._try_coerce_args(value) would not raise blocks = self.putmask(mask, value, inplace=inplace) blocks = [ b.make_block(values=self._try_coerce_result(b.values)) for b in blocks ] return self._maybe_downcast(blocks, downcast) + # we can't process the value, but nothing to do + if not mask.any(): + return self if inplace else self.copy() + + # operate column-by-column + def f(m, v, i): + block = self.coerce_to_target_dtype(value) + + # slice out our block + if i is not None: + block = block.getitem_block(slice(i, i + 1)) + return block.fillna(value, limit=limit, inplace=inplace, downcast=None) + + return self.split_and_operate(mask, f, inplace) + def split_and_operate(self, mask, f, inplace): """ split the block per-column, and apply the callable f @@ -2275,7 +2271,13 @@ def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return tipo == _NS_DTYPE or tipo == np.int64 - return is_integer(element) or isinstance(element, datetime) or isna(element) + if isinstance(element, datetime): + return element.tzinfo is None + if is_integer(element): + return element == tslibs.iNaT + + # TODO: shouldnt we exclude timedelta64("NaT")? See GH#27297 + return isna(element) def _coerce_values(self, values): return values.view("i8") @@ -2627,6 +2629,8 @@ def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, (np.timedelta64, np.int64)) + if element is NaT: + return True return is_integer(element) or isinstance( element, (timedelta, np.timedelta64, np.int64) )
`_can_hold_element` is not well-documented, and in local testing it does _not_ match my intution of "we could do `self.values[:] = element` without raising". But adding two assertions in `fillna` _does_ work in all the tests: ``` # fillna, but if we cannot coerce, then try again as an ObjectBlock try: # equivalent: self._try_coerce_args(value) would not raise # Note: we only call try_coerce_args to let it raise self._try_coerce_args(value) assert self._can_hold_element(value) except (TypeError, ValueError): assert not self._can_hold_element(value) ``` (at least after patching the `DatetimeBlock` and `TimedeltaBlock` implementations)
https://api.github.com/repos/pandas-dev/pandas/pulls/27298
2019-07-08T21:51:04Z
2019-07-10T16:20:05Z
2019-07-10T16:20:05Z
2019-07-10T16:26:24Z
CLN: checks instead of try/except
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index bf6ebf1abe760..2d936fd8290f1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -3376,36 +3376,36 @@ def _putmask_smart(v, m, n): # will work in the current dtype try: nn = n[m] - + except TypeError: + # TypeError: only integer scalar arrays can be converted to a scalar index + pass + else: # make sure that we have a nullable type # if we have nulls if not _isna_compat(v, nn[0]): - raise ValueError - - # we ignore ComplexWarning here - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", np.ComplexWarning) - nn_at = nn.astype(v.dtype) - - # avoid invalid dtype comparisons - # between numbers & strings - - # only compare integers/floats - # don't compare integers to datetimelikes - if not is_numeric_v_string_like(nn, nn_at) and ( - is_float_dtype(nn.dtype) - or is_integer_dtype(nn.dtype) - and is_float_dtype(nn_at.dtype) - or is_integer_dtype(nn_at.dtype) - ): + pass + elif is_numeric_v_string_like(nn, v): + # avoid invalid dtype comparisons + # between numbers & strings + pass + elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)): + # only compare integers/floats + pass + elif not (is_float_dtype(v.dtype) or is_integer_dtype(v.dtype)): + # only compare integers/floats + pass + else: + + # we ignore ComplexWarning here + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", np.ComplexWarning) + nn_at = nn.astype(v.dtype) comp = nn == nn_at if is_list_like(comp) and comp.all(): nv = v.copy() nv[m] = nn_at return nv - except (ValueError, IndexError, TypeError, OverflowError): - pass n = np.asarray(n)
Trying to whittle down try blocks in internals since many of them are unclear on the failure modes
https://api.github.com/repos/pandas-dev/pandas/pulls/27296
2019-07-08T21:08:15Z
2019-07-09T20:48:31Z
2019-07-09T20:48:31Z
2019-07-10T00:04:59Z
CLN: Remove unused vars in roll_window
diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 6203577e450d9..46e4b17b8164c 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1683,7 +1683,7 @@ def roll_window(ndarray[float64_t, ndim=1, cast=True] values, """ cdef: ndarray[float64_t] output, tot_wgt, counts - Py_ssize_t in_i, win_i, win_n, win_k, in_n, in_k + Py_ssize_t in_i, win_i, win_n, in_n float64_t val_in, val_win, c, w in_n = len(values)
https://api.github.com/repos/pandas-dev/pandas/pulls/27294
2019-07-08T19:34:09Z
2019-07-09T20:49:30Z
2019-07-09T20:49:29Z
2019-07-11T15:45:57Z
BUG: Incorrect Message in KeyError with MultiIndex
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 193a0edee5e96..f9173cf10b490 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1040,6 +1040,7 @@ Indexing - Improved exception message when calling :meth:`DataFrame.iloc` with a list of non-numeric objects (:issue:`25753`). - Improved exception message when calling ``.iloc`` or ``.loc`` with a boolean indexer with different length (:issue:`26658`). +- Bug in ``KeyError`` exception message when indexing a :class:`MultiIndex` with a non-existant key not displaying the original key (:issue:`27250`). - Bug in ``.iloc`` and ``.loc`` with a boolean indexer not raising an ``IndexError`` when too few items are passed (:issue:`26658`). - Bug in :meth:`DataFrame.loc` and :meth:`Series.loc` where ``KeyError`` was not raised for a ``MultiIndex`` when the key was less than or equal to the number of levels in the :class:`MultiIndex` (:issue:`14885`). - Bug in which :meth:`DataFrame.append` produced an erroneous warning indicating that a ``KeyError`` will be thrown in the future when the data to be appended contains new columns (:issue:`22252`). diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 71b551adaf3ef..ff0bffacd37ad 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2810,7 +2810,10 @@ def partial_selection(key, indexer=None): if len(key) == self.nlevels and self.is_unique: # Complete key in unique index -> standard get_loc - return (self._engine.get_loc(key), None) + try: + return (self._engine.get_loc(key), None) + except KeyError as e: + raise KeyError(key) from e else: return partial_selection(key) else: diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py index 0c61644eb46ae..145bfe168390e 100644 --- a/pandas/tests/indexing/multiindex/test_getitem.py +++ b/pandas/tests/indexing/multiindex/test_getitem.py @@ -83,9 +83,9 @@ def test_series_getitem_returns_scalar( @pytest.mark.parametrize( "indexer,expected_error,expected_error_msg", [ - (lambda s: s.__getitem__((2000, 3, 4)), KeyError, r"^356$"), - (lambda s: s[(2000, 3, 4)], KeyError, r"^356$"), - (lambda s: s.loc[(2000, 3, 4)], KeyError, r"^356$"), + (lambda s: s.__getitem__((2000, 3, 4)), KeyError, r"^\(2000, 3, 4\)$"), + (lambda s: s[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"), + (lambda s: s.loc[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"), (lambda s: s.loc[(2000, 3, 4, 5)], IndexingError, "Too many indexers"), (lambda s: s.__getitem__(len(s)), IndexError, "index out of bounds"), (lambda s: s[len(s)], IndexError, "index out of bounds"), diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index 9188adc7d6e93..11dc57c3bda12 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -389,7 +389,7 @@ def test_loc_getitem_lowerdim_corner(multiindex_dataframe_random_data): df = multiindex_dataframe_random_data # test setup - check key not in dataframe - with pytest.raises(KeyError, match=r"^11$"): + with pytest.raises(KeyError, match=r"^\('bar', 'three'\)$"): df.loc[("bar", "three"), "B"] # in theory should be inserting in a sorted space????
- [x] closes #27250 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27291
2019-07-08T16:10:49Z
2019-07-09T20:57:38Z
2019-07-09T20:57:37Z
2019-07-10T09:39:35Z
DOC: Small whatsnew fixes
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 68ecb4c487a1e..6e3917bde6f16 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -190,7 +190,7 @@ Other enhancements - Added support for ISO week year format ('%G-%V-%u') when parsing datetimes using :meth:`to_datetime` (:issue:`16607`) - Indexing of ``DataFrame`` and ``Series`` now accepts zerodim ``np.ndarray`` (:issue:`24919`) - :meth:`Timestamp.replace` now supports the ``fold`` argument to disambiguate DST transition times (:issue:`25017`) -- :meth:`DataFrame.at_time` and :meth:`Series.at_time` now support :meth:`datetime.time` objects with timezones (:issue:`24043`) +- :meth:`DataFrame.at_time` and :meth:`Series.at_time` now support :class:`datetime.time` objects with timezones (:issue:`24043`) - :meth:`DataFrame.pivot_table` now accepts an ``observed`` parameter which is passed to underlying calls to :meth:`DataFrame.groupby` to speed up grouping categorical data. (:issue:`24923`) - ``Series.str`` has gained :meth:`Series.str.casefold` method to removes all case distinctions present in a string (:issue:`25405`) - :meth:`DataFrame.set_index` now works for instances of ``abc.Iterator``, provided their output is of the same length as the calling frame (:issue:`22484`, :issue:`24984`) @@ -406,9 +406,8 @@ Previously, columns that were categorical, but not the groupby key(s) would be c .. ipython:: python - df = pd.DataFrame( - {'payload': [-1, -2, -1, -2], - 'col': pd.Categorical(["foo", "bar", "bar", "qux"], ordered=True)}) + cat = pd.Categorical(["foo", "bar", "bar", "qux"], ordered=True) + df = pd.DataFrame({'payload': [-1, -2, -1, -2], 'col': cat}) df df.dtypes @@ -879,7 +878,7 @@ Other deprecations :meth:`SparseArray.get_values` and :meth:`Categorical.get_values` methods are deprecated. One of ``np.asarray(..)`` or :meth:`~Series.to_numpy` can be used instead (:issue:`19617`). - The 'outer' method on NumPy ufuncs, e.g. ``np.subtract.outer`` has been deprecated on :class:`Series` objects. Convert the input to an array with :attr:`Series.array` first (:issue:`27186`) -- :meth:`Timedelta.resolution` is deprecated and replaced with :meth:`Timedelta.resolution_string`. In a future version, :meth:`Timedelta.resolution` will be changed to behave like the standard library :attr:`timedelta.resolution` (:issue:`21344`) +- :meth:`Timedelta.resolution` is deprecated and replaced with :meth:`Timedelta.resolution_string`. In a future version, :meth:`Timedelta.resolution` will be changed to behave like the standard library :attr:`datetime.timedelta.resolution` (:issue:`21344`) - :func:`read_table` has been undeprecated. (:issue:`25220`) - :attr:`Index.dtype_str` is deprecated. (:issue:`18262`) - :attr:`Series.imag` and :attr:`Series.real` are deprecated. (:issue:`18262`) @@ -902,7 +901,7 @@ Removal of prior version deprecations/changes - Removed the previously deprecated ``convert_objects`` (:issue:`11221`) - Removed the previously deprecated ``select`` method of ``DataFrame`` and ``Series`` (:issue:`17633`) - Removed the previously deprecated behavior of :class:`Series` treated as list-like in :meth:`~Series.cat.rename_categories` (:issue:`17982`) -- Removed the previously deprecated ``DataFrame.reindex_axis`` and ``Series.reindex_axis``` (:issue:`17842`) +- Removed the previously deprecated ``DataFrame.reindex_axis`` and ``Series.reindex_axis`` (:issue:`17842`) - Removed the previously deprecated behavior of altering column or index labels with :meth:`Series.rename_axis` or :meth:`DataFrame.rename_axis` (:issue:`17842`) - Removed the previously deprecated ``tupleize_cols`` keyword argument in :meth:`read_html`, :meth:`read_csv`, and :meth:`DataFrame.to_csv` (:issue:`17877`, :issue:`17820`) - Removed the previously deprecated ``DataFrame.from.csv`` and ``Series.from_csv`` (:issue:`17812`) @@ -910,7 +909,7 @@ Removal of prior version deprecations/changes - Removed the previously deprecated ``ordered`` and ``categories`` keyword arguments in ``astype`` (:issue:`17742`) - Removed the previously deprecated ``cdate_range`` (:issue:`17691`) - Removed the previously deprecated ``True`` option for the ``dropna`` keyword argument in :func:`SeriesGroupBy.nth` (:issue:`17493`) -- Removed the previously deprecated ``convert`` keyword argument in :meth:`Series.take` and :meth:`DataFrame.take`(:issue:`17352`) +- Removed the previously deprecated ``convert`` keyword argument in :meth:`Series.take` and :meth:`DataFrame.take` (:issue:`17352`) .. _whatsnew_0250.performance: @@ -1133,7 +1132,7 @@ Groupby/resample/rolling - Bug in :meth:`pandas.core.window.Rolling.median` and :meth:`pandas.core.window.Rolling.quantile` where MemoryError is raised with empty window (:issue:`26005`) - Bug in :meth:`pandas.core.window.Rolling.median` and :meth:`pandas.core.window.Rolling.quantile` where incorrect results are returned with ``closed='left'`` and ``closed='neither'`` (:issue:`26005`) - Improved :class:`pandas.core.window.Rolling`, :class:`pandas.core.window.Window` and :class:`pandas.core.window.EWM` functions to exclude nuisance columns from results instead of raising errors and raise a ``DataError`` only if all columns are nuisance (:issue:`12537`) -- Bug in :meth:`pandas.core.window.Rolling.max` and :meth:`pandas.core.window.Rolling.min` where incorrect results are returned with an empty variable window`` (:issue:`26005`) +- Bug in :meth:`pandas.core.window.Rolling.max` and :meth:`pandas.core.window.Rolling.min` where incorrect results are returned with an empty variable window (:issue:`26005`) Reshaping ^^^^^^^^^
A couple small things I saw reading over the whatsnew notes.
https://api.github.com/repos/pandas-dev/pandas/pulls/27289
2019-07-08T15:16:23Z
2019-07-10T04:29:52Z
2019-07-10T04:29:52Z
2019-07-10T04:29:56Z
Add comment and test for geopandas compat fix (GH27259)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c30885291ffc9..2566f05daeaea 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -123,6 +123,9 @@ def __getitem__(self, key): # generally slice or list. # TODO(ix): most/all of the TypeError cases here are for ix, # so this check can be removed once ix is removed. + # The InvalidIndexError is only catched for compatibility + # with geopandas, see + # https://github.com/pandas-dev/pandas/issues/27258 pass else: if is_scalar(values): diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index d644c002fbdfb..93baafddedeb4 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -10,7 +10,7 @@ from pandas.compat import PY36 -from pandas import DataFrame +from pandas import DataFrame, Series from pandas.util import testing as tm @@ -123,6 +123,26 @@ def test_geopandas(): assert geopandas.read_file(fp) is not None +def test_geopandas_coordinate_indexer(): + # this test is included to have coverage of one case in the indexing.py + # code that is only kept for compatibility with geopandas, see + # https://github.com/pandas-dev/pandas/issues/27258 + # We should be able to remove this after some time when its usage is + # removed in geopandas + from pandas.core.indexing import _NDFrameIndexer + + class _CoordinateIndexer(_NDFrameIndexer): + def _getitem_tuple(self, tup): + obj = self.obj + xs, ys = tup + return obj[xs][ys] + + Series._create_indexer("cx", _CoordinateIndexer) + s = Series(range(5)) + res = s.cx[:, :] + tm.assert_series_equal(s, res) + + # Cython import warning @pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning") def test_pyarrow(df):
xref #27259 cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/27287
2019-07-08T12:44:49Z
2019-07-08T13:31:04Z
2019-07-08T13:31:03Z
2019-07-08T13:31:04Z
BUG: Fix CategoricalIndex.__contains__ with non-hashable, closes #21729
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 68ecb4c487a1e..cbb6fda1375bd 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1050,6 +1050,7 @@ Indexing - Bug which produced ``AttributeError`` on partial matching :class:`Timestamp` in a :class:`MultiIndex` (:issue:`26944`) - Bug in :class:`Categorical` and :class:`CategoricalIndex` with :class:`Interval` values when using the ``in`` operator (``__contains``) with objects that are not comparable to the values in the ``Interval`` (:issue:`23705`) - Bug in :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` on a :class:`DataFrame` with a single timezone-aware datetime64[ns] column incorrectly returning a scalar instead of a :class:`Series` (:issue:`27110`) +- Bug in :class:`CategoricalIndex` and :class:`Categorical` incorrectly raising ``ValueError`` instead of ``TypeError`` when a list is passed using the ``in`` operator (``__contains__``) (:issue:`21729`) - Missing diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index c4f7d6dbe32fa..df5cd12a479f0 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2020,7 +2020,7 @@ def __contains__(self, key): Returns True if `key` is in this Categorical. """ # if key is a NaN, check if any NaN is in self. - if isna(key): + if is_scalar(key) and isna(key): return self.isna().any() return contains(self, key, container=self._codes) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a1989fd62b6ee..ce1b99b315936 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -60,6 +60,7 @@ is_extension_array_dtype, is_extension_type, is_float_dtype, + is_hashable, is_integer, is_integer_dtype, is_iterator, @@ -2954,16 +2955,12 @@ def __getitem__(self, key): key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) - # shortcut if the key is in columns - try: + if is_hashable(key): + # shortcut if the key is in columns if self.columns.is_unique and key in self.columns: if self.columns.nlevels > 1: return self._getitem_multilevel(key) return self._get_item_cache(key) - except (TypeError, ValueError): - # The TypeError correctly catches non hashable "key" (e.g. list) - # The ValueError can be removed once GH #21729 is fixed - pass # Do we have a slicer (on rows)? indexer = convert_to_index_sliceable(self, key) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 9550d68f1d32b..8f605e487ecf4 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -407,7 +407,7 @@ def _reverse_indexer(self): @Appender(_index_shared_docs["contains"] % _index_doc_kwargs) def __contains__(self, key): # if key is a NaN, check if any NaN is in self. - if isna(key): + if is_scalar(key) and isna(key): return self.hasnans return contains(self, key, container=self._engine) diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index 697ee483db6d9..9a09ea8422b1f 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -417,3 +417,15 @@ def test_contains_interval(self, item, expected): cat = Categorical(pd.IntervalIndex.from_breaks(range(3))) result = item in cat assert result is expected + + def test_contains_list(self): + # GH#21729 + cat = Categorical([1, 2, 3]) + + assert "a" not in cat + + with pytest.raises(TypeError, match="unhashable type"): + ["a"] in cat + + with pytest.raises(TypeError, match="unhashable type"): + ["a", "b"] in cat diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index d52bc818c95aa..2b9632acd83ca 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -276,6 +276,18 @@ def test_contains_interval(self, item, expected): result = item in ci assert result is expected + def test_contains_list(self): + # GH#21729 + idx = pd.CategoricalIndex([1, 2, 3]) + + assert "a" not in idx + + with pytest.raises(TypeError, match="unhashable type"): + ["a"] in idx + + with pytest.raises(TypeError, match="unhashable type"): + ["a", "b"] in idx + def test_map(self): ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True) result = ci.map(lambda x: x.lower())
- [x] closes #21729 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27284
2019-07-08T04:22:33Z
2019-07-09T21:13:28Z
2019-07-09T21:13:28Z
2019-07-10T00:05:27Z
TST/STYLE: concatenate string literals post black reformatting
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index f582bf8b13975..8179ab08895da 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -469,7 +469,7 @@ class TestMultiplicationDivision: pytest.param( pd.Index, marks=pytest.mark.xfail( - reason="Index.__div__ always " "raises", raises=TypeError + reason="Index.__div__ always raises", raises=TypeError ), ), pd.Series, diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index bd21335a7f9c7..e54c16c7a27a4 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -960,7 +960,7 @@ def test_add_iadd_timedeltalike_annual(self): def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq): other = mismatched_freq rng = pd.period_range("2014", "2024", freq="A") - msg = "Input has different freq(=.+)? " "from Period.*?\\(freq=A-DEC\\)" + msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)" with pytest.raises(IncompatibleFrequency, match=msg): rng + other with pytest.raises(IncompatibleFrequency, match=msg): diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 06c4a6ece4bcc..326c565308124 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -318,19 +318,19 @@ def _check(result, expected): _check(result, expected) # tz mismatches - msg = "Timestamp subtraction must have the same timezones or no" " timezones" + msg = "Timestamp subtraction must have the same timezones or no timezones" with pytest.raises(TypeError, match=msg): dt_tz - ts msg = "can't subtract offset-naive and offset-aware datetimes" with pytest.raises(TypeError, match=msg): dt_tz - dt - msg = "Timestamp subtraction must have the same timezones or no" " timezones" + msg = "Timestamp subtraction must have the same timezones or no timezones" with pytest.raises(TypeError, match=msg): dt_tz - ts_tz2 msg = "can't subtract offset-naive and offset-aware datetimes" with pytest.raises(TypeError, match=msg): dt - dt_tz - msg = "Timestamp subtraction must have the same timezones or no" " timezones" + msg = "Timestamp subtraction must have the same timezones or no timezones" with pytest.raises(TypeError, match=msg): ts - dt_tz with pytest.raises(TypeError, match=msg): @@ -1771,7 +1771,7 @@ def test_td64arr_floordiv_int(self, box_with_array): result = idx // 1 tm.assert_equal(result, idx) - pattern = "floor_divide cannot use operands|" "Cannot divide int by Timedelta*" + pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*" with pytest.raises(TypeError, match=pattern): 1 // idx diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index cd8ec7fcb787d..697ee483db6d9 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -313,7 +313,7 @@ def test_unordered_different_categories_raises(self): c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False) c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False) - with pytest.raises(TypeError, match=("Categoricals can " "only be compared")): + with pytest.raises(TypeError, match=("Categoricals can only be compared")): c1 == c2 def test_compare_different_lengths(self): diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py index c01b52456ff87..dfdb08fa78cbc 100644 --- a/pandas/tests/arrays/test_integer.py +++ b/pandas/tests/arrays/test_integer.py @@ -88,7 +88,7 @@ def test_repr_dtype(dtype, expected): def test_repr_array(): result = repr(integer_array([1, None, 3])) - expected = "<IntegerArray>\n" "[1, NaN, 3]\n" "Length: 3, dtype: Int64" + expected = "<IntegerArray>\n[1, NaN, 3]\nLength: 3, dtype: Int64" assert result == expected diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py index fab59d312fb9d..252f278242fcc 100644 --- a/pandas/tests/arrays/test_period.py +++ b/pandas/tests/arrays/test_period.py @@ -259,7 +259,7 @@ def test_repr_small(): arr = period_array(["2000", "2001"], freq="D") result = str(arr) expected = ( - "<PeriodArray>\n" "['2000-01-01', '2001-01-01']\n" "Length: 2, dtype: period[D]" + "<PeriodArray>\n['2000-01-01', '2001-01-01']\nLength: 2, dtype: period[D]" ) assert result == expected diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 2fd7c8f04c8be..37a885e33847f 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -92,7 +92,7 @@ def _eval_single_bin(lhs, cmp1, rhs, engine): return c(lhs, rhs) except ValueError as e: if str(e).startswith( - "negative number cannot be " "raised to a fractional power" + "negative number cannot be raised to a fractional power" ): return np.nan raise @@ -362,7 +362,7 @@ def get_expected_pow_result(self, lhs, rhs): expected = _eval_single_bin(lhs, "**", rhs, self.engine) except ValueError as e: if str(e).startswith( - "negative number cannot be " "raised to a fractional power" + "negative number cannot be raised to a fractional power" ): if self.engine == "python": pytest.skip(str(e)) @@ -1944,7 +1944,7 @@ def test_empty_string_raises(engine, parser): def test_more_than_one_expression_raises(engine, parser): - with pytest.raises(SyntaxError, match=("only a single expression " "is allowed")): + with pytest.raises(SyntaxError, match=("only a single expression is allowed")): pd.eval("1 + 1; 2 + 2", engine=engine, parser=parser) diff --git a/pandas/tests/extension/arrow/bool.py b/pandas/tests/extension/arrow/bool.py index ee043a6bb837c..eb75d6d968073 100644 --- a/pandas/tests/extension/arrow/bool.py +++ b/pandas/tests/extension/arrow/bool.py @@ -33,7 +33,7 @@ def construct_from_string(cls, string): if string == cls.name: return cls() else: - raise TypeError("Cannot construct a '{}' from " "'{}'".format(cls, string)) + raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string)) @classmethod def construct_array_type(cls): diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 90e6a91fbb91a..c28ff956a33a4 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -40,7 +40,7 @@ def construct_from_string(cls, string): if string == cls.name: return cls() else: - raise TypeError("Cannot construct a '{}' from " "'{}'".format(cls, string)) + raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string)) @property def _is_numeric(self): @@ -172,7 +172,7 @@ def _reduce(self, name, skipna=True, **kwargs): op = getattr(self.data, name) except AttributeError: raise NotImplementedError( - "decimal does not support " "the {} operation".format(name) + "decimal does not support the {} operation".format(name) ) return op(axis=0) diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 272936f6ec9f0..9dec023f4073a 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -207,7 +207,7 @@ def test_series_repr(self, data): # TODO(extension) @pytest.mark.xfail( reason=( - "raising AssertionError as this is not implemented, " "though easy enough to do" + "raising AssertionError as this is not implemented, though easy enough to do" ) ) def test_series_constructor_coerce_data_to_extension_dtype_raises(): diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index ece1924b1b228..21c4ac8f055a2 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -44,7 +44,7 @@ def construct_from_string(cls, string): if string == cls.name: return cls() else: - raise TypeError("Cannot construct a '{}' from " "'{}'".format(cls, string)) + raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string)) class JSONArray(ExtensionArray): diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 2ce65bd15387e..912e8b5fba233 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -1304,7 +1304,7 @@ def test_rename_axis_style_raises(self): df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"]) # Named target and axis - over_spec_msg = "Cannot specify both 'axis' and " "any of 'index' or 'columns'" + over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'" with pytest.raises(TypeError, match=over_spec_msg): df.rename(index=str.lower, axis=1) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 13ffa8d17d47c..d5c66f0c1dd64 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -363,9 +363,7 @@ def test_corr_cov_independent_index_column(self): def test_corr_invalid_method(self): # GH 22298 df = pd.DataFrame(np.random.normal(size=(10, 2))) - msg = ( - "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " - ) + msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, " with pytest.raises(ValueError, match=msg): df.corr(method="____") @@ -1441,7 +1439,7 @@ def test_mean_datetimelike(self): tm.assert_series_equal(result, expected) @pytest.mark.xfail( - reason="casts to object-dtype and then tries to " "add timestamps", + reason="casts to object-dtype and then tries to add timestamps", raises=TypeError, strict=True, ) @@ -1643,7 +1641,7 @@ def test_idxmin(self, float_frame, int_frame): expected = df.apply(Series.idxmin, axis=axis, skipna=skipna) tm.assert_series_equal(result, expected) - msg = "No axis named 2 for object type" " <class 'pandas.core.frame.DataFrame'>" + msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>" with pytest.raises(ValueError, match=msg): frame.idxmin(axis=2) @@ -1658,7 +1656,7 @@ def test_idxmax(self, float_frame, int_frame): expected = df.apply(Series.idxmax, axis=axis, skipna=skipna) tm.assert_series_equal(result, expected) - msg = "No axis named 2 for object type" " <class 'pandas.core.frame.DataFrame'>" + msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>" with pytest.raises(ValueError, match=msg): frame.idxmax(axis=2) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 93508d7ddc50b..76a210e129eb3 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -149,7 +149,7 @@ def test_not_hashable(self): empty_frame = DataFrame() df = self.klass([1]) - msg = "'(Sparse)?DataFrame' objects are mutable, thus they cannot be" " hashed" + msg = "'(Sparse)?DataFrame' objects are mutable, thus they cannot be hashed" with pytest.raises(TypeError, match=msg): hash(df) with pytest.raises(TypeError, match=msg): diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 37b0d61ee31d9..1b6ee91317996 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -286,7 +286,7 @@ def f(dtype): data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)) return DataFrame(data=data, columns=["A", "B", "C"], dtype=dtype) - msg = "compound dtypes are not implemented in the DataFrame" " constructor" + msg = "compound dtypes are not implemented in the DataFrame constructor" with pytest.raises(NotImplementedError, match=msg): f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index a16ca7045cfdd..349e2d9c578be 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -474,7 +474,7 @@ def test_constructor_error_msgs(self): with pytest.raises(ValueError, match=msg): DataFrame((range(10), range(10, 20)), columns=("ones", "twos")) - msg = "If using all scalar " "values, you must pass " "an index" + msg = "If using all scalar values, you must pass an index" with pytest.raises(ValueError, match=msg): DataFrame({"a": False, "b": True}) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index ba6a9d2aa6ee9..00be13b1c0e72 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -686,9 +686,7 @@ def test_astype_dict_like(self, dtype_class): # in the keys of the dtype dict dt4 = dtype_class({"b": str, 2: str}) dt5 = dtype_class({"e": str}) - msg = ( - "Only a column name can be used for the key in a dtype mappings" " argument" - ) + msg = "Only a column name can be used for the key in a dtype mappings argument" with pytest.raises(KeyError, match=msg): df.astype(dt4) with pytest.raises(KeyError, match=msg): @@ -1194,11 +1192,11 @@ def test_astype_str(self, timezone_frame): with option_context("display.max_columns", 20): result = str(timezone_frame) assert ( - "0 2013-01-01 2013-01-01 00:00:00-05:00 " "2013-01-01 00:00:00+01:00" + "0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00" ) in result assert ( - "1 2013-01-02 " "NaT NaT" + "1 2013-01-02 NaT NaT" ) in result assert ( - "2 2013-01-03 2013-01-03 00:00:00-05:00 " "2013-01-03 00:00:00+01:00" + "2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00" ) in result diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index c63a5ba64495f..94667ecfa837d 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -126,7 +126,7 @@ def test_dropna(self): assert_frame_equal(dropped, expected) # bad input - msg = "No axis named 3 for object type" " <class 'pandas.core.frame.DataFrame'>" + msg = "No axis named 3 for object type <class 'pandas.core.frame.DataFrame'>" with pytest.raises(ValueError, match=msg): df.dropna(axis=3) @@ -362,7 +362,7 @@ def test_na_actions_categorical(self): res = df.fillna(value={"cats": 3, "vals": "b"}) tm.assert_frame_equal(res, df_exp_fill) - with pytest.raises(ValueError, match=("fill value must " "be in categories")): + with pytest.raises(ValueError, match=("fill value must be in categories")): df.fillna(value={"cats": 4, "vals": "c"}) res = df.fillna(method="pad") diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py index bbb3395fb23af..236cadf67735d 100644 --- a/pandas/tests/frame/test_quantile.py +++ b/pandas/tests/frame/test_quantile.py @@ -90,9 +90,7 @@ def test_quantile_axis_parameter(self): result = df.quantile(0.5, axis="columns") assert_series_equal(result, expected) - msg = ( - "No axis named -1 for object type" " <class 'pandas.core.frame.DataFrame'>" - ) + msg = "No axis named -1 for object type <class 'pandas.core.frame.DataFrame'>" with pytest.raises(ValueError, match=msg): df.quantile(0.1, axis=-1) msg = ( diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index b6442d8938843..24833f8c02df0 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -55,7 +55,7 @@ def test_sort_values(self): sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False]) assert_frame_equal(sorted_df, expected) - msg = "No axis named 2 for object type" " <class 'pandas.core.frame.DataFrame'>" + msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>" with pytest.raises(ValueError, match=msg): frame.sort_values(by=["A", "B"], axis=2, inplace=True) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 92801b02dee22..1ca8333154c13 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -901,7 +901,7 @@ def test_frame_to_period(self): pts = df.to_period("M", axis=1) tm.assert_index_equal(pts.columns, exp.columns.asfreq("M")) - msg = "No axis named 2 for object type" " <class 'pandas.core.frame.DataFrame'>" + msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>" with pytest.raises(ValueError, match=msg): df.to_period(axis=2) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index aef6c3fe8070c..b2b38980d0ceb 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -724,10 +724,10 @@ def test_squeeze(self): tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0]) tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0]) assert df.squeeze() == df.iloc[0, 0] - msg = "No axis named 2 for object type <class" " 'pandas.core.frame.DataFrame'>" + msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>" with pytest.raises(ValueError, match=msg): df.squeeze(axis=2) - msg = "No axis named x for object type <class" " 'pandas.core.frame.DataFrame'>" + msg = "No axis named x for object type <class 'pandas.core.frame.DataFrame'>" with pytest.raises(ValueError, match=msg): df.squeeze(axis="x") diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 68e3db3a1ccb0..efc3142b25b82 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -92,7 +92,7 @@ def test_builtins_apply(keys, f): result = df.groupby(keys).apply(f) ngroups = len(df.drop_duplicates(subset=keys)) - assert_msg = "invalid frame shape: {} " "(expected ({}, 3))".format( + assert_msg = "invalid frame shape: {} (expected ({}, 3))".format( result.shape, ngroups ) assert result.shape == (ngroups, 3), assert_msg @@ -1220,7 +1220,7 @@ def test_size_groupby_all_null(): def test_quantile(interpolation, a_vals, b_vals, q): if interpolation == "nearest" and q == 0.5 and b_vals == [4, 3, 2, 1]: pytest.skip( - "Unclear numpy expectation for nearest result with " "equidistant data" + "Unclear numpy expectation for nearest result with equidistant data" ) a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation) @@ -1243,9 +1243,7 @@ def test_quantile_raises(): [["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"] ) - with pytest.raises( - TypeError, match="cannot be performed against " "'object' dtypes" - ): + with pytest.raises(TypeError, match="cannot be performed against 'object' dtypes"): df.groupby("key").quantile() diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 1fd67caadf2e4..72099f2fa3f11 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -378,7 +378,7 @@ def test_groupby_grouper_f_sanity_checked(self): ts.groupby(lambda key: key[0:6]) def test_grouping_error_on_multidim_input(self, df): - msg = "Grouper for '<class 'pandas.core.frame.DataFrame'>'" " not 1-dimensional" + msg = "Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional" with pytest.raises(ValueError, match=msg): Grouping(df.index, df[["A", "A"]]) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 705e4080cf34e..1eab3ba253f4d 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -768,7 +768,7 @@ def test_transform_with_non_scalar_group(): @pytest.mark.parametrize("agg_func", ["count", "rank", "size"]) def test_transform_numeric_ret(cols, exp, comp_func, agg_func): if agg_func == "size" and isinstance(cols, list): - pytest.xfail("'size' transformation not supported with " "NDFrameGroupy") + pytest.xfail("'size' transformation not supported with NDFrameGroupy") # GH 19200 df = pd.DataFrame( diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py index 03e10ff44c299..ee380c6108c38 100644 --- a/pandas/tests/groupby/test_whitelist.py +++ b/pandas/tests/groupby/test_whitelist.py @@ -232,9 +232,7 @@ def test_groupby_blacklist(df_letters): blacklist.extend(to_methods) # e.g., to_csv - defined_but_not_allowed = ( - "(?:^Cannot.+{0!r}.+{1!r}.+try using the " "'apply' method$)" - ) + defined_but_not_allowed = "(?:^Cannot.+{0!r}.+{1!r}.+try using the 'apply' method$)" # e.g., query, eval not_defined = "(?:^{1!r} object has no attribute {0!r}$)" diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index f22c820253ee5..6708feda7dd1e 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -663,7 +663,7 @@ def test_constructor_dtype(self): DatetimeIndex(idx, dtype="datetime64[ns]") # this is effectively trying to convert tz's - msg = "data is already tz-aware US/Eastern, unable to set specified" " tz: CET" + msg = "data is already tz-aware US/Eastern, unable to set specified tz: CET" with pytest.raises(TypeError, match=msg): DatetimeIndex(idx, dtype="datetime64[ns, CET]") msg = "cannot supply both a tz and a dtype with a tz" diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py index f0aae748092e3..33a744cc25ca1 100644 --- a/pandas/tests/indexes/datetimes/test_formats.py +++ b/pandas/tests/indexes/datetimes/test_formats.py @@ -83,8 +83,8 @@ def test_dti_representation(self, method): ) exp = [] - exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""") - exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', " "freq='D')") + exp.append("DatetimeIndex([], dtype='datetime64[ns]', freq='D')") + exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D')") exp.append( "DatetimeIndex(['2011-01-01', '2011-01-02'], " "dtype='datetime64[ns]', freq='D')" @@ -132,9 +132,9 @@ def test_dti_representation_to_series(self): exp1 = """Series([], dtype: datetime64[ns])""" - exp2 = "0 2011-01-01\n" "dtype: datetime64[ns]" + exp2 = "0 2011-01-01\ndtype: datetime64[ns]" - exp3 = "0 2011-01-01\n" "1 2011-01-02\n" "dtype: datetime64[ns]" + exp3 = "0 2011-01-01\n1 2011-01-02\ndtype: datetime64[ns]" exp4 = ( "0 2011-01-01\n" @@ -186,13 +186,13 @@ def test_dti_summary(self): ["2011-01-01 09:00", "2011-01-01 10:00", pd.NaT], tz="US/Eastern" ) - exp1 = "DatetimeIndex: 0 entries\n" "Freq: D" + exp1 = "DatetimeIndex: 0 entries\nFreq: D" - exp2 = "DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\n" "Freq: D" + exp2 = "DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\nFreq: D" - exp3 = "DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\n" "Freq: D" + exp3 = "DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\nFreq: D" - exp4 = "DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\n" "Freq: D" + exp4 = "DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\nFreq: D" exp5 = ( "DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 " diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index ec4310dbc8396..10d422e8aa52c 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -352,7 +352,7 @@ def test_to_datetime_iso_week_year_format(self, s, _format, dt): [ "ISO year directive '%G' must be used with the ISO week directive " "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", - "1999 " "Monday", + "1999 Monday", "%G %A", ], [ diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index dfe3a97ec9b90..962ed2b1cf8ed 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -389,7 +389,7 @@ def test_frame_repr(self): {"A": [1, 2, 3, 4]}, index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4]) ) result = repr(df) - expected = " A\n" "(0, 1] 1\n" "(1, 2] 2\n" "(2, 3] 3\n" "(3, 4] 4" + expected = " A\n(0, 1] 1\n(1, 2] 2\n(2, 3] 3\n(3, 4] 4" assert result == expected @pytest.mark.parametrize( @@ -406,7 +406,7 @@ def test_frame_repr(self): ), ( pd.DataFrame, - (" 0\n" "(0.0, 1.0] a\n" "NaN b\n" "(2.0, 3.0] c"), + (" 0\n(0.0, 1.0] a\nNaN b\n(2.0, 3.0] c"), ), ], ) diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py index 7f5d57db8da88..36152bc4b60cd 100644 --- a/pandas/tests/indexes/multi/test_analytics.py +++ b/pandas/tests/indexes/multi/test_analytics.py @@ -212,9 +212,7 @@ def test_take_fill_value(): expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"]) tm.assert_index_equal(result, expected) - msg = ( - "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" - ) + msg = "When allow_fill=True and fill_value is not None, all indices must be >= -1" with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -2]), fill_value=True) with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py index 1b6177ede30ec..86c9ee3455d0b 100644 --- a/pandas/tests/indexes/multi/test_constructor.py +++ b/pandas/tests/indexes/multi/test_constructor.py @@ -454,7 +454,7 @@ def test_from_product_empty_three_levels(N): "invalid_input", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]] ) def test_from_product_invalid_input(invalid_input): - msg = r"Input must be a list / sequence of iterables|" "Input must be list-like" + msg = r"Input must be a list / sequence of iterables|Input must be list-like" with pytest.raises(TypeError, match=msg): MultiIndex.from_product(iterables=invalid_input) diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py index 21b71613f00f0..64d2859cd13db 100644 --- a/pandas/tests/indexes/multi/test_contains.py +++ b/pandas/tests/indexes/multi/test_contains.py @@ -81,7 +81,7 @@ def test_isin_level_kwarg(): msg = "Too many levels: Index has only 2 levels, not 6" with pytest.raises(IndexError, match=msg): idx.isin(vals_0, level=5) - msg = "Too many levels: Index has only 2 levels, -5 is not a valid level" " number" + msg = "Too many levels: Index has only 2 levels, -5 is not a valid level number" with pytest.raises(IndexError, match=msg): idx.isin(vals_0, level=-5) diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 75dea68eadbf7..d366dbd8bc0a8 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -187,9 +187,7 @@ def test_get_indexer(): def test_get_indexer_nearest(): midx = MultiIndex.from_tuples([("a", 1), ("b", 2)]) - msg = ( - "method='nearest' not implemented yet for MultiIndex; see GitHub" " issue 9365" - ) + msg = "method='nearest' not implemented yet for MultiIndex; see GitHub issue 9365" with pytest.raises(NotImplementedError, match=msg): midx.get_indexer(["a"], method="nearest") msg = "tolerance not implemented yet for MultiIndex" @@ -275,7 +273,7 @@ def test_get_loc(idx): with pytest.raises(KeyError, match=r"^'quux'$"): idx.get_loc("quux") - msg = "only the default get_loc method is currently supported for" " MultiIndex" + msg = "only the default get_loc method is currently supported for MultiIndex" with pytest.raises(NotImplementedError, match=msg): idx.get_loc("foo", method="nearest") diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index 1057ca7bbd662..80e4b1fe1e430 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -72,19 +72,19 @@ def test_shift_corner_cases(self): tm.assert_index_equal(idx.shift(3), idx) idx = pd.PeriodIndex( - ["2011-01-01 10:00", "2011-01-01 11:00" "2011-01-01 12:00"], + ["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"], name="xxx", freq="H", ) tm.assert_index_equal(idx.shift(0), idx) exp = pd.PeriodIndex( - ["2011-01-01 13:00", "2011-01-01 14:00" "2011-01-01 15:00"], + ["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"], name="xxx", freq="H", ) tm.assert_index_equal(idx.shift(3), exp) exp = pd.PeriodIndex( - ["2011-01-01 07:00", "2011-01-01 08:00" "2011-01-01 09:00"], + ["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"], name="xxx", freq="H", ) diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py index 7c10239faad42..eab55b91b3e60 100644 --- a/pandas/tests/indexes/period/test_construction.py +++ b/pandas/tests/indexes/period/test_construction.py @@ -394,15 +394,15 @@ def test_constructor_freq_mult(self, func, warning): ) tm.assert_index_equal(pidx, expected) - msg = "Frequency must be positive, because it" " represents span: -1M" + msg = "Frequency must be positive, because it represents span: -1M" with pytest.raises(ValueError, match=msg): PeriodIndex(["2011-01"], freq="-1M") - msg = "Frequency must be positive, because it" " represents span: 0M" + msg = "Frequency must be positive, because it represents span: 0M" with pytest.raises(ValueError, match=msg): PeriodIndex(["2011-01"], freq="0M") - msg = "Frequency must be positive, because it" " represents span: 0M" + msg = "Frequency must be positive, because it represents span: 0M" with pytest.raises(ValueError, match=msg): period_range("2011-01", periods=3, freq="0M") diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py index c5566f74af11e..2a88b79f381c4 100644 --- a/pandas/tests/indexes/period/test_formats.py +++ b/pandas/tests/indexes/period/test_formats.py @@ -48,7 +48,7 @@ class TestPeriodIndexRendering: def test_frame_repr(self): df = pd.DataFrame({"A": [1, 2, 3]}, index=pd.date_range("2000", periods=3)) result = repr(df) - expected = " A\n" "2000-01-01 1\n" "2000-01-02 2\n" "2000-01-03 3" + expected = " A\n2000-01-01 1\n2000-01-02 2\n2000-01-03 3" assert result == expected @pytest.mark.parametrize("method", ["__repr__", "__str__"]) @@ -65,13 +65,11 @@ def test_representation(self, method): idx9 = pd.period_range("2013Q1", periods=3, freq="Q") idx10 = PeriodIndex(["2011-01-01", "2011-02-01"], freq="3D") - exp1 = """PeriodIndex([], dtype='period[D]', freq='D')""" + exp1 = "PeriodIndex([], dtype='period[D]', freq='D')" - exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')""" + exp2 = "PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')" - exp3 = ( - "PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', " "freq='D')" - ) + exp3 = "PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', freq='D')" exp4 = ( "PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " @@ -88,11 +86,9 @@ def test_representation(self, method): "dtype='period[H]', freq='H')" ) - exp7 = "PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', " "freq='Q-DEC')" + exp7 = "PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', freq='Q-DEC')" - exp8 = ( - "PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', " "freq='Q-DEC')" - ) + exp8 = "PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', freq='Q-DEC')" exp9 = ( "PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], " diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index 94b061330002f..03e4bd5834166 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -91,7 +91,7 @@ def test_union(self, sort): ["2000-01-01 09:01", "2000-01-01 09:03", "2000-01-01 09:05"], freq="T" ) other5 = pd.PeriodIndex( - ["2000-01-01 09:01", "2000-01-01 09:05" "2000-01-01 09:08"], freq="T" + ["2000-01-01 09:01", "2000-01-01 09:05", "2000-01-01 09:08"], freq="T" ) expected5 = pd.PeriodIndex( [ diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index e52954a114578..1db2c5c3a8dac 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -270,7 +270,7 @@ def test_to_timestamp_pi_nat(self): tm.assert_index_equal(result3, exp) assert result3.freqstr == "3M" - msg = "Frequency must be positive, because it" " represents span: -2A" + msg = "Frequency must be positive, because it represents span: -2A" with pytest.raises(ValueError, match=msg): result.to_period(freq="-2A") diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f0382a040e063..e75d80bec1fdf 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -564,7 +564,7 @@ def test_constructor_overflow_int64(self): with pytest.raises(OverflowError, match=msg): Index([np.iinfo(np.uint64).max - 1], dtype="int64") - @pytest.mark.xfail(reason="see GH#21311: Index " "doesn't enforce dtype argument") + @pytest.mark.xfail(reason="see GH#21311: Index doesn't enforce dtype argument") def test_constructor_cast(self): msg = "could not convert string to float" with pytest.raises(ValueError, match=msg): @@ -728,9 +728,7 @@ def test_nanosecond_index_access(self): # this does not yet work, as parsing strings is done via dateutil # assert first_value == x['2013-01-01 00:00:00.000000050+0000'] - expected_ts = np_datetime64_compat( - "2013-01-01 00:00:00.000000050+" "0000", "ns" - ) + expected_ts = np_datetime64_compat("2013-01-01 00:00:00.000000050+0000", "ns") assert first_value == x[Timestamp(expected_ts)] def test_booleanindex(self): @@ -2361,7 +2359,7 @@ def test_string_index_repr(self, index, expected): # short ( pd.Index(["あ", "いい", "ううう"]), - ("Index(['あ', 'いい', 'ううう'], " "dtype='object')"), + ("Index(['あ', 'いい', 'ううう'], dtype='object')"), ), # multiple lines ( diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 465b7f5e23bb8..b9bdaf40f8589 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -348,7 +348,7 @@ def test_has_duplicates(self, indices): # MultiIndex tested separately in: # tests/indexes/multi/test_unique_and_duplicates. # RangeIndex is unique by definition. - pytest.skip("Skip check for empty Index, MultiIndex, " "and RangeIndex") + pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex") idx = holder([indices[0]] * 5) assert idx.is_unique is False diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 1feb82a923b19..f246307e63e3b 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -172,7 +172,7 @@ def test_constructor_invalid(self): ) with pytest.raises(TypeError, match=msg): Float64Index(["a", "b", 0.0]) - msg = r"float\(\) argument must be a string or a number, not" " 'Timestamp'" + msg = r"float\(\) argument must be a string or a number, not 'Timestamp'" with pytest.raises(TypeError, match=msg): Float64Index([Timestamp("20130101")]) @@ -569,9 +569,7 @@ def test_take_fill_value(self): tm.assert_index_equal(result, expected) name = self._holder.__name__ - msg = ("Unable to fill values because " "{name} cannot contain NA").format( - name=name - ) + msg = "Unable to fill values because {name} cannot contain NA".format(name=name) # fill_value=True with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 0f51a6333ab2d..4544657f79af7 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -215,9 +215,7 @@ def test_ops_ndarray(self): msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'" with pytest.raises(TypeError, match=msg): td + np.array([1]) - msg = ( - r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and" " 'Timedelta'" - ) + msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'" with pytest.raises(TypeError, match=msg): np.array([1]) + td @@ -227,7 +225,7 @@ def test_ops_ndarray(self): msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'" with pytest.raises(TypeError, match=msg): td - np.array([1]) - msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and" " 'Timedelta'" + msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'" with pytest.raises(TypeError, match=msg): np.array([1]) - td diff --git a/pandas/tests/indexes/timedeltas/test_formats.py b/pandas/tests/indexes/timedeltas/test_formats.py index ebc5f720d46fb..1dfc5b5305008 100644 --- a/pandas/tests/indexes/timedeltas/test_formats.py +++ b/pandas/tests/indexes/timedeltas/test_formats.py @@ -13,13 +13,11 @@ def test_representation(self, method): idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D") idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"]) - exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')""" + exp1 = "TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')" - exp2 = "TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', " "freq='D')" + exp2 = "TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')" - exp3 = ( - "TimedeltaIndex(['1 days', '2 days'], " "dtype='timedelta64[ns]', freq='D')" - ) + exp3 = "TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')" exp4 = ( "TimedeltaIndex(['1 days', '2 days', '3 days'], " @@ -47,11 +45,11 @@ def test_representation_to_series(self): exp1 = """Series([], dtype: timedelta64[ns])""" - exp2 = "0 1 days\n" "dtype: timedelta64[ns]" + exp2 = "0 1 days\ndtype: timedelta64[ns]" - exp3 = "0 1 days\n" "1 2 days\n" "dtype: timedelta64[ns]" + exp3 = "0 1 days\n1 2 days\ndtype: timedelta64[ns]" - exp4 = "0 1 days\n" "1 2 days\n" "2 3 days\n" "dtype: timedelta64[ns]" + exp4 = "0 1 days\n1 2 days\n2 3 days\ndtype: timedelta64[ns]" exp5 = ( "0 1 days 00:00:01\n" @@ -75,15 +73,15 @@ def test_summary(self): idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D") idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"]) - exp1 = "TimedeltaIndex: 0 entries\n" "Freq: D" + exp1 = "TimedeltaIndex: 0 entries\nFreq: D" - exp2 = "TimedeltaIndex: 1 entries, 1 days to 1 days\n" "Freq: D" + exp2 = "TimedeltaIndex: 1 entries, 1 days to 1 days\nFreq: D" - exp3 = "TimedeltaIndex: 2 entries, 1 days to 2 days\n" "Freq: D" + exp3 = "TimedeltaIndex: 2 entries, 1 days to 2 days\nFreq: D" - exp4 = "TimedeltaIndex: 3 entries, 1 days to 3 days\n" "Freq: D" + exp4 = "TimedeltaIndex: 3 entries, 1 days to 3 days\nFreq: D" - exp5 = "TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days " "00:00:00" + exp5 = "TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00" for idx, expected in zip( [idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5] diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 3549d81623e10..0dccf023c66f8 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -378,7 +378,7 @@ def test_loc_listlike_dtypes(self): exp = DataFrame({"A": [1, 1, 2], "B": [4, 4, 5]}, index=exp_index) tm.assert_frame_equal(res, exp, check_index_type=True) - msg = "a list-indexer must only include " "values that are in the categories" + msg = "a list-indexer must only include values that are in the categories" with pytest.raises(KeyError, match=msg): df.loc[["a", "x"]] @@ -401,7 +401,7 @@ def test_loc_listlike_dtypes(self): ) tm.assert_frame_equal(res, exp, check_index_type=True) - msg = "a list-indexer must only include values " "that are in the categories" + msg = "a list-indexer must only include values that are in the categories" with pytest.raises(KeyError, match=msg): df.loc[["a", "x"]] @@ -431,7 +431,7 @@ def test_loc_listlike_dtypes(self): ) tm.assert_frame_equal(res, exp, check_index_type=True) - msg = "a list-indexer must only include values " "that are in the categories" + msg = "a list-indexer must only include values that are in the categories" with pytest.raises(KeyError, match=msg): df.loc[["a", "x"]] diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index 7d47063623d87..702bf0b15dec9 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -307,7 +307,7 @@ def test_setting_with_copy_bug(self): ) mask = pd.isna(df.c) - msg = "A value is trying to be set on a copy of a slice from a" " DataFrame" + msg = "A value is trying to be set on a copy of a slice from a DataFrame" with pytest.raises(com.SettingWithCopyError, match=msg): df[["c"]][mask] = df[["b"]][mask] diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index a18f8380f80c1..dea1d5114f1b9 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -694,7 +694,7 @@ def test_where_index_datetime(self): assert obj.dtype == "datetime64[ns]" cond = pd.Index([True, False, True, False]) - msg = "Index\\(\\.\\.\\.\\) must be called with a collection " "of some kind" + msg = "Index\\(\\.\\.\\.\\) must be called with a collection of some kind" with pytest.raises(TypeError, match=msg): obj.where(cond, fill_val) @@ -725,7 +725,7 @@ def test_where_index_datetimetz(self): assert obj.dtype == "datetime64[ns]" cond = pd.Index([True, False, True, False]) - msg = "Index\\(\\.\\.\\.\\) must be called with a collection " "of some kind" + msg = "Index\\(\\.\\.\\.\\) must be called with a collection of some kind" with pytest.raises(TypeError, match=msg): obj.where(cond, fill_val) @@ -1031,7 +1031,7 @@ def test_replace_series(self, how, to_key, from_key): # TODO(jbrockmendel) commented out to only have a single xfail printed @pytest.mark.xfail( - reason="GH #18376, tzawareness-compat bug " "in BlockManager.replace_list" + reason="GH #18376, tzawareness-compat bug in BlockManager.replace_list" ) # @pytest.mark.parametrize('how', ['dict', 'series']) # @pytest.mark.parametrize('to_key', ['timedelta64[ns]', 'bool', 'object', diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 760d8c70b9434..60a6a509c0912 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -668,11 +668,11 @@ def test_iloc_mask(self): # GH 3631, iloc with a mask (of a series) should raise df = DataFrame(list(range(5)), index=list("ABCDE"), columns=["a"]) mask = df.a % 2 == 0 - msg = "iLocation based boolean indexing cannot use an indexable as" " a mask" + msg = "iLocation based boolean indexing cannot use an indexable as a mask" with pytest.raises(ValueError, match=msg): df.iloc[mask] mask.index = range(len(mask)) - msg = "iLocation based boolean indexing on an integer type is not" " available" + msg = "iLocation based boolean indexing on an integer type is not available" with pytest.raises(NotImplementedError, match=msg): df.iloc[mask] @@ -693,7 +693,7 @@ def test_iloc_mask(self): ("index", ""): "0b11", ("index", ".loc"): "0b11", ("index", ".iloc"): ( - "iLocation based boolean indexing " "cannot use an indexable as a mask" + "iLocation based boolean indexing cannot use an indexable as a mask" ), ("locs", ""): "Unalignable boolean Series provided as indexer " "(index of the boolean Series and of the indexed " diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index a6e1273a229dc..e6ccee684b76b 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -147,7 +147,7 @@ def test_at_to_fail(self): s = Series([1, 2, 3], index=[3, 2, 1]) result = s.at[1] assert result == 3 - msg = "At based indexing on an integer index can only have integer" " indexers" + msg = "At based indexing on an integer index can only have integer indexers" with pytest.raises(ValueError, match=msg): s.at["a"] diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 6beb847da3eb4..2d4fb87d0c6bf 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -537,7 +537,7 @@ def test_astype(self): assert tmgr.get("e").dtype.type == t # mixed - mgr = create_mgr("a,b: object; c: bool; d: datetime;" "e: f4; f: f2; g: f8") + mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8") for t in ["float16", "float32", "float64", "int32", "int64"]: t = np.dtype(t) tmgr = mgr.astype(t, errors="ignore") @@ -599,7 +599,7 @@ def _compare(old_mgr, new_mgr): assert new_mgr.get("g").dtype == np.float64 mgr = create_mgr( - "a,b,foo: object; f: i4; bool: bool; dt: datetime;" "i: i8; g: f8; h: f2" + "a,b,foo: object; f: i4; bool: bool; dt: datetime; i: i8; g: f8; h: f2" ) mgr.set("a", np.array(["1"] * N, dtype=np.object_)) mgr.set("b", np.array(["2."] * N, dtype=np.object_)) @@ -703,7 +703,7 @@ def test_reindex_index(self): def test_reindex_items(self): # mgr is not consolidated, f8 & f8-2 blocks - mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8;" "f: bool; g: f8-2") + mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2") reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0) assert reindexed.nblocks == 2 diff --git a/pandas/tests/io/excel/test_style.py b/pandas/tests/io/excel/test_style.py index 76b27bce11b08..7ee84077a5334 100644 --- a/pandas/tests/io/excel/test_style.py +++ b/pandas/tests/io/excel/test_style.py @@ -14,7 +14,7 @@ pytest.param( "xlwt", marks=pytest.mark.xfail( - reason="xlwt does not support " "openpyxl-compatible " "style dicts" + reason="xlwt does not support openpyxl-compatible style dicts" ), ), "xlsxwriter", diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index cf26b20e5d004..0908ed885a6ca 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -172,7 +172,7 @@ def test_excel_multindex_roundtrip( with ensure_clean(ext) as pth: if c_idx_levels == 1 and c_idx_names: pytest.skip( - "Column index name cannot be " "serialized unless it's a MultiIndex" + "Column index name cannot be serialized unless it's a MultiIndex" ) # Empty name case current read in as diff --git a/pandas/tests/io/formats/test_eng_formatting.py b/pandas/tests/io/formats/test_eng_formatting.py index b122e4f6c3f33..d2a2d0a6a9706 100644 --- a/pandas/tests/io/formats/test_eng_formatting.py +++ b/pandas/tests/io/formats/test_eng_formatting.py @@ -24,16 +24,12 @@ def test_eng_float_formatter(self): fmt.set_eng_float_format(use_eng_prefix=True) result = df.to_string() - expected = ( - " A\n" "0 1.410\n" "1 141.000\n" "2 14.100k\n" "3 1.410M" - ) + expected = " A\n0 1.410\n1 141.000\n2 14.100k\n3 1.410M" assert result == expected fmt.set_eng_float_format(accuracy=0) result = df.to_string() - expected = ( - " A\n" "0 1E+00\n" "1 141E+00\n" "2 14E+03\n" "3 1E+06" - ) + expected = " A\n0 1E+00\n1 141E+00\n2 14E+03\n3 1E+06" assert result == expected tm.reset_display_options() diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index af862b11c756c..818bbc566aca8 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -374,7 +374,7 @@ def test_repr_max_columns_max_rows(self): term_width, term_height = get_terminal_size() if term_width < 10 or term_height < 10: pytest.skip( - "terminal size too small, " "{0} x {1}".format(term_width, term_height) + "terminal size too small, {0} x {1}".format(term_width, term_height) ) def mkframe(n): @@ -1409,11 +1409,11 @@ def test_to_string_no_index(self): df_s = df.to_string(index=False) # Leading space is expected for positive numbers. - expected = " x y z\n" " 11 33 AAA\n" " 22 -44 " + expected = " x y z\n 11 33 AAA\n 22 -44 " assert df_s == expected df_s = df[["y", "x", "z"]].to_string(index=False) - expected = " y x z\n" " 33 11 AAA\n" "-44 22 " + expected = " y x z\n 33 11 AAA\n-44 22 " assert df_s == expected def test_to_string_line_width_no_index(self): @@ -1475,7 +1475,7 @@ def test_to_string_float_formatting(self): df = DataFrame({"x": [3234, 0.253]}) df_s = df.to_string() - expected = " x\n" "0 3234.000\n" "1 0.253" + expected = " x\n0 3234.000\n1 0.253" assert df_s == expected tm.reset_display_options() @@ -1485,9 +1485,9 @@ def test_to_string_float_formatting(self): df_s = df.to_string() if _three_digit_exp(): - expected = " x\n" "0 1.000000e+009\n" "1 2.512000e-001" + expected = " x\n0 1.000000e+009\n1 2.512000e-001" else: - expected = " x\n" "0 1.000000e+09\n" "1 2.512000e-01" + expected = " x\n0 1.000000e+09\n1 2.512000e-01" assert df_s == expected def test_to_string_float_format_no_fixed_width(self): @@ -1526,14 +1526,14 @@ def test_to_string_small_float_values(self): # but not all exactly zero df = df * 0 result = df.to_string() - expected = " 0\n" "0 0\n" "1 0\n" "2 -0" + expected = " 0\n0 0\n1 0\n2 -0" def test_to_string_float_index(self): index = Index([1.5, 2, 3, 4, 5]) df = DataFrame(np.arange(5), index=index) result = df.to_string() - expected = " 0\n" "1.5 0\n" "2.0 1\n" "3.0 2\n" "4.0 3\n" "5.0 4" + expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4" assert result == expected def test_to_string_complex_float_formatting(self): @@ -1562,7 +1562,7 @@ def test_to_string_ascii_error(self): "0 ", " .gitignore ", " 5 ", - " \xe2\x80\xa2\xe2\x80\xa2\xe2\x80" "\xa2\xe2\x80\xa2\xe2\x80\xa2", + " \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2", ) ] df = DataFrame(data) @@ -1575,7 +1575,7 @@ def test_to_string_int_formatting(self): assert issubclass(df["x"].dtype.type, np.integer) output = df.to_string() - expected = " x\n" "0 -15\n" "1 20\n" "2 25\n" "3 -35" + expected = " x\n0 -15\n1 20\n2 25\n3 -35" assert output == expected def test_to_string_index_formatter(self): @@ -1596,7 +1596,7 @@ def test_to_string_left_justify_cols(self): tm.reset_display_options() df = DataFrame({"x": [3234, 0.253]}) df_s = df.to_string(justify="left") - expected = " x \n" "0 3234.000\n" "1 0.253" + expected = " x \n0 3234.000\n1 0.253" assert df_s == expected def test_to_string_format_na(self): @@ -2077,7 +2077,7 @@ def test_to_string(self): result = cp.to_string(length=True, name=True, dtype=True) last_line = result.split("\n")[-1].strip() assert last_line == ( - "Freq: B, Name: foo, " "Length: {cp}, dtype: float64".format(cp=len(cp)) + "Freq: B, Name: foo, Length: {cp}, dtype: float64".format(cp=len(cp)) ) def test_freq_name_separation(self): @@ -2136,22 +2136,18 @@ def test_east_asian_unicode_series(self): # unicode index s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"]) - expected = ( - "あ a\nいい bb\nううう CCC\n" "ええええ D\ndtype: object" - ) + expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object" assert repr(s) == expected # unicode values s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"]) - expected = ( - "a あ\nbb いい\nc ううう\n" "ddd ええええ\ndtype: object" - ) + expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object" assert repr(s) == expected # both s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"]) expected = ( - "ああ あ\nいいいい いい\nう ううう\n" "えええ ええええ\ndtype: object" + "ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object" ) assert repr(s) == expected @@ -2181,7 +2177,7 @@ def test_east_asian_unicode_series(self): # object dtype, shorter than unicode repr s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"]) expected = ( - "1 1\nAB 22\nNaN 3333\n" "あああ 44444\ndtype: int64" + "1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64" ) assert repr(s) == expected @@ -2559,7 +2555,7 @@ def test_format_explicit(self): exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object" assert exp == res res = repr(test_sers["twol"]) - exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype:" " object" + exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object" assert exp == res res = repr(test_sers["asc"]) exp = ( diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 7bd27b2ad9be3..f2fb54796f177 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -484,12 +484,12 @@ def test_bar_align_left(self): (1, 0): [ "width: 10em", " height: 80%", - "background: linear-gradient(" "90deg,red 25.0%, transparent 25.0%)", + "background: linear-gradient(90deg,red 25.0%, transparent 25.0%)", ], (2, 0): [ "width: 10em", " height: 80%", - "background: linear-gradient(" "90deg,red 50.0%, transparent 50.0%)", + "background: linear-gradient(90deg,red 50.0%, transparent 50.0%)", ], } assert result == expected diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 7b493266144b0..c6485ff21bcfb 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -12,7 +12,7 @@ class TestToCSV: @pytest.mark.xfail( (3, 6, 5) > sys.version_info >= (3, 5), - reason=("Python csv library bug " "(see https://bugs.python.org/issue32255)"), + reason=("Python csv library bug (see https://bugs.python.org/issue32255)"), ) def test_to_csv_with_single_column(self): # see gh-18676, https://bugs.python.org/issue32255 diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 28c8837731ec1..c2753d23966c6 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -508,7 +508,7 @@ def test_convert_json_field_to_pandas_type(self, inp, exp): def test_convert_json_field_to_pandas_type_raises(self, inp): field = {"type": inp} with pytest.raises( - ValueError, match=("Unsupported or invalid field " "type: {}".format(inp)) + ValueError, match=("Unsupported or invalid field type: {}".format(inp)) ): convert_json_field_to_pandas_type(field) diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 82cd00c2d121d..412e5014c8d23 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -297,7 +297,7 @@ def test_meta_name_conflict(self): } ] - msg = r"Conflicting metadata name (foo|bar)," " need distinguishing prefix" + msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix" with pytest.raises(ValueError, match=msg): json_normalize(data, "data", meta=["foo", "bar"]) @@ -491,7 +491,7 @@ def test_json_normalize_errors(self, missing_metadata): # If meta keys are not always present a new option to set # errors='ignore' has been implemented - msg = "Try running with errors='ignore' as key 'name'" " is not always present" + msg = "Try running with errors='ignore' as key 'name' is not always present" with pytest.raises(KeyError, match=msg): json_normalize( data=missing_metadata, diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index a0686b53b83a4..970fd465fd4ec 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1434,7 +1434,7 @@ def test_to_jsonl(self): def test_latin_encoding(self): # GH 13774 - pytest.skip("encoding not implemented in .to_json(), " "xref #13774") + pytest.skip("encoding not implemented in .to_json(), xref #13774") values = [ [b"E\xc9, 17", b"", b"a", b"b", b"c"], @@ -1589,7 +1589,7 @@ def test_index_false_error_to_json(self, orient): df = pd.DataFrame([[1, 2], [4, 5]], columns=["a", "b"]) - msg = "'index=False' is only valid when " "'orient' is 'split' or 'table'" + msg = "'index=False' is only valid when 'orient' is 'split' or 'table'" with pytest.raises(ValueError, match=msg): df.to_json(orient=orient, index=False) diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 7d5bf9ec850bc..d469d3c2e51de 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1243,7 +1243,7 @@ def test_catch_too_many_names(all_parsers): 10,11,12\n""" parser = all_parsers msg = ( - "Too many columns specified: " "expected 4 and found 3" + "Too many columns specified: expected 4 and found 3" if parser.engine == "c" else "Number of passed names did not match " "number of header fields in the file" diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py index 06ae2c0fef1b9..9d0eab0b9a907 100644 --- a/pandas/tests/io/parser/test_compression.py +++ b/pandas/tests/io/parser/test_compression.py @@ -89,7 +89,7 @@ def test_compression(parser_and_data, compression_only, buffer, filename): filename = filename if filename is None else filename.format(ext=ext) if filename and buffer: - pytest.skip("Cannot deduce compression from " "buffer of compressed data.") + pytest.skip("Cannot deduce compression from buffer of compressed data.") with tm.ensure_clean(filename=filename) as path: tm.write_to_compressed(compress_type, path, data) @@ -144,7 +144,7 @@ def test_invalid_compression(all_parsers, invalid_compression): parser = all_parsers compress_kwargs = dict(compression=invalid_compression) - msg = "Unrecognized compression " "type: {compression}".format(**compress_kwargs) + msg = "Unrecognized compression type: {compression}".format(**compress_kwargs) with pytest.raises(ValueError, match=msg): parser.read_csv("test_file.zip", **compress_kwargs) diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py index ff1dd10bdd0d9..99e0181741998 100644 --- a/pandas/tests/io/parser/test_header.py +++ b/pandas/tests/io/parser/test_header.py @@ -124,11 +124,11 @@ def test_header_multi_index(all_parsers): ), ( dict(index_col=[0, 1], names=["foo", "bar"]), - ("cannot specify names " "when specifying a " "multi-index header"), + ("cannot specify names when specifying a multi-index header"), ), ( dict(index_col=[0, 1], usecols=["foo", "bar"]), - ("cannot specify " "usecols when " "specifying a " "multi-index header"), + ("cannot specify usecols when specifying a multi-index header"), ), ], ) diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py index 8199d632223c1..4dfb8d3bd2dc8 100644 --- a/pandas/tests/io/parser/test_index_col.py +++ b/pandas/tests/io/parser/test_index_col.py @@ -62,9 +62,8 @@ def test_index_col_is_true(all_parsers): data = "a,b\n1,2" parser = all_parsers - with pytest.raises( - ValueError, match="The value of index_col " "couldn't be 'True'" - ): + msg = "The value of index_col couldn't be 'True'" + with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), index_col=True) diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 99e4e5c022ecb..5d79f6e281ef1 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -560,7 +560,7 @@ def test_multiple_date_cols_with_header(all_parsers): KORD1,19990127, 19:00:00 KORD2,19990127, 20:00:00""", [[1, 2]], - ("New date column already " "in dict date_NominalTime"), + ("New date column already in dict date_NominalTime"), ), ( """\ @@ -1272,7 +1272,7 @@ def test_parse_date_time(all_parsers, data, kwargs, expected): def test_parse_date_fields(all_parsers): parser = all_parsers - data = "year,month,day,a\n2001,01,10,10.\n" "2001,02,1,11." + data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11." result = parser.read_csv( StringIO(data), header=0, diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index 57096a2652b88..73638fe8ab7c8 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -56,7 +56,7 @@ def test_string_factorize(self): assert len(set(map(id, result[0]))) == 2 def test_skipinitialspace(self): - data = "a, b\n" "a, b\n" "a, b\n" "a, b" + data = "a, b\na, b\na, b\na, b" reader = TextReader(StringIO(data), skipinitialspace=True, header=None) result = reader.read() @@ -129,10 +129,10 @@ def test_integer_thousands_alt(self): def test_skip_bad_lines(self, capsys): # too many lines, see #2430 for why - data = "a:b:c\n" "d:e:f\n" "g:h:i\n" "j:k:l:m\n" "l:m:n\n" "o:p:q:r" + data = "a:b:c\nd:e:f\ng:h:i\nj:k:l:m\nl:m:n\no:p:q:r" reader = TextReader(StringIO(data), delimiter=":", header=None) - msg = r"Error tokenizing data\. C error: Expected 3 fields in" " line 4, saw 4" + msg = r"Error tokenizing data\. C error: Expected 3 fields in line 4, saw 4" with pytest.raises(parser.ParserError, match=msg): reader.read() @@ -165,7 +165,7 @@ def test_skip_bad_lines(self, capsys): assert "Skipping line 6" in captured.err def test_header_not_enough_lines(self): - data = "skip this\n" "skip this\n" "a,b,c\n" "1,2,3\n" "4,5,6" + data = "skip this\nskip this\na,b,c\n1,2,3\n4,5,6" reader = TextReader(StringIO(data), delimiter=",", header=2) header = reader.header diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index f135fac65f56a..8bdf53c3caf61 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -95,7 +95,7 @@ def test_python_engine(self, python_engine): 1,2,3,4,""" for default in py_unsupported: - msg = "The %r option is not supported " "with the %r engine" % ( + msg = "The %r option is not supported with the %r engine" % ( default, python_engine, ) diff --git a/pandas/tests/io/parser/test_usecols.py b/pandas/tests/io/parser/test_usecols.py index b449e848a0b5a..47c4f93fbf59c 100644 --- a/pandas/tests/io/parser/test_usecols.py +++ b/pandas/tests/io/parser/test_usecols.py @@ -18,7 +18,7 @@ "integers or a callable." ) _msg_validate_usecols_names = ( - "Usecols do not match columns, columns " "expected but not found: {0}" + "Usecols do not match columns, columns expected but not found: {0}" ) @@ -124,7 +124,7 @@ def test_usecols_name_length_conflict(all_parsers): 10,11,12""" parser = all_parsers msg = ( - "Number of passed names did not " "match number of header fields in the file" + "Number of passed names did not match number of header fields in the file" if parser.engine == "python" else "Passed header names mismatches usecols" ) @@ -501,7 +501,7 @@ def test_incomplete_first_row(all_parsers, usecols): ), # see gh-9549 ( - ("A,B,C\n1,2,3\n3,4,5\n1,2,4,5,1,6\n" "1,2,3,,,1,\n1,2,3\n5,6,7"), + ("A,B,C\n1,2,3\n3,4,5\n1,2,4,5,1,6\n1,2,3,,,1,\n1,2,3\n5,6,7"), ["A", "B", "C"], dict(), DataFrame( diff --git a/pandas/tests/io/pytables/test_pytables.py b/pandas/tests/io/pytables/test_pytables.py index fee7e1cb2ba5f..b9f4defb4edf8 100644 --- a/pandas/tests/io/pytables/test_pytables.py +++ b/pandas/tests/io/pytables/test_pytables.py @@ -1622,7 +1622,7 @@ def check_col(key, name, size): _maybe_remove(store, "df") store.append("df", df_new, data_columns=["A", "B", "string", "string2"]) result = store.select( - "df", "string='foo' and string2='foo'" " and A>0 and B<0" + "df", "string='foo' and string2='foo' and A>0 and B<0" ) expected = df_new[ (df_new.string == "foo") @@ -3726,7 +3726,7 @@ def test_append_to_multiple_dropna(self): tm.assert_index_equal(store.select("df1").index, store.select("df2").index) @pytest.mark.xfail( - run=False, reason="append_to_multiple_dropna_false " "is not raising as failed" + run=False, reason="append_to_multiple_dropna_false is not raising as failed" ) def test_append_to_multiple_dropna_false(self): df1 = tm.makeTimeDataFrame() @@ -3817,9 +3817,7 @@ def test_select_as_multiple(self): @pytest.mark.skipif( LooseVersion(tables.__version__) < LooseVersion("3.1.0"), - reason=( - "tables version does not support fix for nan selection " "bug: GH 4858" - ), + reason=("tables version does not support fix for nan selection bug: GH 4858"), ) def test_nan_selection_bug_4858(self): diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index fa63f102580ff..87a2405a10dd5 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -109,7 +109,7 @@ def test_unsupported_other(self): def test_rw_nthreads(self): df = pd.DataFrame({"A": np.arange(100000)}) expected_warning = ( - "the 'nthreads' keyword is deprecated, " "use 'use_threads' instead" + "the 'nthreads' keyword is deprecated, use 'use_threads' instead" ) # TODO: make the warning work with check_stacklevel=True with tm.assert_produces_warning(FutureWarning, check_stacklevel=False) as w: diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py index 6ca6da01a6d6f..52147f4e1afc7 100644 --- a/pandas/tests/io/test_gbq.py +++ b/pandas/tests/io/test_gbq.py @@ -33,9 +33,7 @@ def _skip_if_no_project_id(): def _skip_if_no_private_key_path(): if not _get_private_key_path(): - pytest.skip( - "Cannot run integration tests without a " "private key json file path" - ) + pytest.skip("Cannot run integration tests without a private key json file path") def _in_travis_environment(): diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index d3d05b6281d5b..9752b4c62aff7 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -220,9 +220,7 @@ def test_skiprows_ndarray(self): assert_framelist_equal(df1, df2) def test_skiprows_invalid(self): - with pytest.raises( - TypeError, match=("is not a valid type " "for skipping rows") - ): + with pytest.raises(TypeError, match=("is not a valid type for skipping rows")): self.read_html(self.spam_data, ".*Water.*", skiprows="asdf") def test_index(self): diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index f3e045be2e790..6ac2e9cd65a27 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -40,13 +40,13 @@ pytest.param( "fastparquet", marks=pytest.mark.skipif( - not _HAVE_FASTPARQUET, reason="fastparquet is " "not installed" + not _HAVE_FASTPARQUET, reason="fastparquet is not installed" ), ), pytest.param( "pyarrow", marks=pytest.mark.skipif( - not _HAVE_PYARROW, reason="pyarrow is " "not installed" + not _HAVE_PYARROW, reason="pyarrow is not installed" ), ), ] diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 4fc90ea41718d..347e280234f91 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -824,7 +824,7 @@ def test_to_sql_index_label_multiindex(self): frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn) assert frame.columns[:2].tolist() == ["C", "D"] - msg = "Length of 'index_label' should match number of levels, which" " is 2" + msg = "Length of 'index_label' should match number of levels, which is 2" with pytest.raises(ValueError, match=msg): sql.to_sql( temp_frame, @@ -1408,7 +1408,7 @@ def check(col): else: raise AssertionError( - "DateCol loaded with incorrect type " "-> {0}".format(col.dtype) + "DateCol loaded with incorrect type -> {0}".format(col.dtype) ) # GH11216 @@ -2566,7 +2566,7 @@ def clean_up(test_table_to_drop): @pytest.mark.single @pytest.mark.db @pytest.mark.skip( - reason="gh-13611: there is no support for MySQL " "if SQLAlchemy is not installed" + reason="gh-13611: there is no support for MySQL if SQLAlchemy is not installed" ) class TestXMySQL(MySQLMixIn): @pytest.fixture(autouse=True, scope="class") diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 715c7e370210f..1e7d568602656 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -915,7 +915,7 @@ def test_drop_column(self): columns = ["byte_", "byte_"] read_stata(self.dta15_117, convert_dates=True, columns=columns) - msg = "The following columns were not found in the Stata data set:" " not_found" + msg = "The following columns were not found in the Stata data set: not_found" with pytest.raises(ValueError, match=msg): columns = ["byte_", "int_", "long_", "not_found"] read_stata(self.dta15_117, convert_dates=True, columns=columns) diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index ecd575020eca6..5ae29dc640dc9 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -179,7 +179,7 @@ def check_format_of_first_point(ax, expected_string): assert expected_string == ax.format_coord(first_x, first_y) except (ValueError): pytest.skip( - "skipping test because issue forming " "test comparison GH7664" + "skipping test because issue forming test comparison GH7664" ) annual = Series(1, index=date_range("2014-01-01", periods=3, freq="A-DEC")) @@ -1501,7 +1501,7 @@ def test_overlapping_datetime(self): s2.plot(ax=ax) s1.plot(ax=ax) - @pytest.mark.xfail(reason="GH9053 matplotlib does not use" " ax.xaxis.converter") + @pytest.mark.xfail(reason="GH9053 matplotlib does not use ax.xaxis.converter") def test_add_matplotlib_datetime64(self): # GH9053 - ensure that a plot with PeriodConverter still understands # datetime64 data. This still fails because matplotlib overrides the diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 0215b79cb993d..65815bcedebfc 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -547,9 +547,7 @@ def test_subplots_timeseries_y_axis(self): with pytest.raises(TypeError, match=msg): testdata.plot(y="text") - @pytest.mark.xfail( - reason="not support for period, categorical, " "datetime_mixed_tz" - ) + @pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz") def test_subplots_timeseries_y_axis_not_supported(self): """ This test will fail for: diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 80365e34fa87a..b6c6f967333a8 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1141,7 +1141,7 @@ def test_validation(self): validate="one_to_many", ) - msg = "Merge keys are not unique in right dataset; not a one-to-one" " merge" + msg = "Merge keys are not unique in right dataset; not a one-to-one merge" with pytest.raises(MergeError, match=msg): merge( left, @@ -1166,7 +1166,7 @@ def test_validation(self): validate="many_to_one", ) - msg = "Merge keys are not unique in left dataset; not a one-to-one" " merge" + msg = "Merge keys are not unique in left dataset; not a one-to-one merge" with pytest.raises(MergeError, match=msg): merge( left_w_dups, @@ -1182,7 +1182,7 @@ def test_validation(self): # Dups on both merge(left_w_dups, right_w_dups, on="a", validate="many_to_many") - msg = "Merge keys are not unique in right dataset; not a many-to-one" " merge" + msg = "Merge keys are not unique in right dataset; not a many-to-one merge" with pytest.raises(MergeError, match=msg): merge( left_w_dups, @@ -1192,7 +1192,7 @@ def test_validation(self): validate="many_to_one", ) - msg = "Merge keys are not unique in left dataset; not a one-to-many" " merge" + msg = "Merge keys are not unique in left dataset; not a one-to-many merge" with pytest.raises(MergeError, match=msg): merge(left_w_dups, right_w_dups, on="a", validate="one_to_many") diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 56e83ada9eb99..1b067c08d2e40 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -121,9 +121,7 @@ def test_tuple_vars_fail_with_multiindex(self): tuple_b = ("B", "b") list_b = [tuple_b] - msg = ( - r"(id|value)_vars must be a list of tuples when columns are" " a MultiIndex" - ) + msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex" for id_vars, value_vars in ( (tuple_a, list_b), (list_a, tuple_b), diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index b497f6c3aa9b4..d3300ffb01c3a 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -731,7 +731,7 @@ def test_pivot_with_list_like_values_nans(self, values, method): tm.assert_frame_equal(result, expected) @pytest.mark.xfail( - reason="MultiIndexed unstack with tuple names fails" "with KeyError GH#19966" + reason="MultiIndexed unstack with tuple names fails with KeyError GH#19966" ) @pytest.mark.parametrize("method", [True, False]) def test_pivot_with_multiindex(self, method): @@ -880,7 +880,7 @@ def test_margins_dtype(self): tm.assert_frame_equal(expected, result) - @pytest.mark.xfail(reason="GH#17035 (len of floats is casted back to " "floats)") + @pytest.mark.xfail(reason="GH#17035 (len of floats is casted back to floats)") def test_margins_dtype_len(self): mi_val = list(product(["bar", "foo"], ["one", "two"])) + [("All", "")] mi = MultiIndex.from_tuples(mi_val, names=("A", "B")) @@ -1575,7 +1575,7 @@ def test_pivot_table_margins_name_with_aggfunc_list(self): expected = pd.DataFrame(table.values, index=ix, columns=cols) tm.assert_frame_equal(table, expected) - @pytest.mark.xfail(reason="GH#17035 (np.mean of ints is casted back to " "ints)") + @pytest.mark.xfail(reason="GH#17035 (np.mean of ints is casted back to ints)") def test_categorical_margins(self, observed): # GH 10989 df = pd.DataFrame( @@ -1589,7 +1589,7 @@ def test_categorical_margins(self, observed): table = df.pivot_table("x", "y", "z", dropna=observed, margins=True) tm.assert_frame_equal(table, expected) - @pytest.mark.xfail(reason="GH#17035 (np.mean of ints is casted back to " "ints)") + @pytest.mark.xfail(reason="GH#17035 (np.mean of ints is casted back to ints)") def test_categorical_margins_category(self, observed): df = pd.DataFrame( {"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2} diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py index ee0ff87e31aea..4cff061cabc40 100644 --- a/pandas/tests/scalar/period/test_asfreq.py +++ b/pandas/tests/scalar/period/test_asfreq.py @@ -31,7 +31,7 @@ def test_asfreq_near_zero_weekly(self): assert week2.asfreq("D", "S") <= per2 @pytest.mark.xfail( - reason="GH#19643 period_helper asfreq functions fail " "to check for overflows" + reason="GH#19643 period_helper asfreq functions fail to check for overflows" ) def test_to_timestamp_out_of_bounds(self): # GH#19643, currently gives Timestamp('1754-08-30 22:43:41.128654848') diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 34d2fa6a9194c..771a67dfceaa8 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -390,11 +390,11 @@ def test_period_cons_mult(self): assert result.freq == p1.freq assert result.freqstr == "3M" - msg = "Frequency must be positive, because it" " represents span: -3M" + msg = "Frequency must be positive, because it represents span: -3M" with pytest.raises(ValueError, match=msg): Period("2011-01", freq="-3M") - msg = "Frequency must be positive, because it" " represents span: 0M" + msg = "Frequency must be positive, because it represents span: 0M" with pytest.raises(ValueError, match=msg): Period("2011-01", freq="0M") @@ -445,7 +445,7 @@ def test_period_cons_combined(self): assert result.freq == p2.freq assert result.freqstr == "25H" - msg = "Frequency must be positive, because it" " represents span: -25H" + msg = "Frequency must be positive, because it represents span: -25H" with pytest.raises(ValueError, match=msg): Period("2011-01", freq="-1D1H") with pytest.raises(ValueError, match=msg): @@ -455,7 +455,7 @@ def test_period_cons_combined(self): with pytest.raises(ValueError, match=msg): Period(ordinal=1, freq="-1H1D") - msg = "Frequency must be positive, because it" " represents span: 0D" + msg = "Frequency must be positive, because it represents span: 0D" with pytest.raises(ValueError, match=msg): Period("2011-01", freq="0D0H") with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/scalar/timedelta/test_construction.py b/pandas/tests/scalar/timedelta/test_construction.py index 9917e8bc4c9ac..ae1e84576c092 100644 --- a/pandas/tests/scalar/timedelta/test_construction.py +++ b/pandas/tests/scalar/timedelta/test_construction.py @@ -239,9 +239,8 @@ def test_iso_constructor(fmt, exp): ], ) def test_iso_constructor_raises(fmt): - with pytest.raises( - ValueError, match=("Invalid ISO 8601 Duration " "format - {}".format(fmt)) - ): + msg = "Invalid ISO 8601 Duration format - {}".format(fmt) + with pytest.raises(ValueError, match=msg): Timedelta(fmt) diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py index f64cf97acf805..424b0c9abdef8 100644 --- a/pandas/tests/scalar/timestamp/test_timezones.py +++ b/pandas/tests/scalar/timestamp/test_timezones.py @@ -63,11 +63,11 @@ def test_tz_localize_ambiguous(self): ts.tz_localize("US/Eastern", ambiguous="infer") # GH#8025 - msg = "Cannot localize tz-aware Timestamp, " "use tz_convert for conversions" + msg = "Cannot localize tz-aware Timestamp, use tz_convert for conversions" with pytest.raises(TypeError, match=msg): Timestamp("2011-01-01", tz="US/Eastern").tz_localize("Asia/Tokyo") - msg = "Cannot convert tz-naive Timestamp, " "use tz_localize to localize" + msg = "Cannot convert tz-naive Timestamp, use tz_localize to localize" with pytest.raises(TypeError, match=msg): Timestamp("2011-01-01").tz_convert("Asia/Tokyo") diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py index 31a1f43470f2c..c93a000f5e7ce 100644 --- a/pandas/tests/series/indexing/test_alter_index.py +++ b/pandas/tests/series/indexing/test_alter_index.py @@ -480,7 +480,7 @@ def test_rename(): @pytest.mark.parametrize( - "data, index, drop_labels," " axis, expected_data, expected_index", + "data, index, drop_labels, axis, expected_data, expected_index", [ # Unique Index ([1, 2], ["one", "two"], ["two"], 0, [1], ["one"]), @@ -503,7 +503,7 @@ def test_drop_unique_and_non_unique_index( @pytest.mark.parametrize( - "data, index, drop_labels," " axis, error_type, error_desc", + "data, index, drop_labels, axis, error_type, error_desc", [ # single string/tuple-like (range(3), list("abc"), "bc", 0, KeyError, "not found in axis"), diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index 9b76ed026e580..01b4a3c84a565 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -353,7 +353,7 @@ def test_where_setitem_invalid(): # GH 2702 # make sure correct exceptions are raised on invalid list assignment - msg = "cannot set using a {} indexer with a different length than" " the value" + msg = "cannot set using a {} indexer with a different length than the value" # slice s = Series(list("abc")) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 89b411a284563..67373686d6728 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -229,7 +229,7 @@ def test_cummax_timedelta64(self): tm.assert_series_equal(expected, result) def test_npdiff(self): - pytest.skip("skipping due to Series no longer being an " "ndarray") + pytest.skip("skipping due to Series no longer being an ndarray") # no longer works as the return type of np.diff is now nd.array s = Series(np.arange(5)) @@ -407,9 +407,7 @@ def test_corr_invalid_method(self): # GH PR #22298 s1 = pd.Series(np.random.randn(10)) s2 = pd.Series(np.random.randn(10)) - msg = ( - "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " - ) + msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, " with pytest.raises(ValueError, match=msg): s1.corr(s2, method="____") @@ -820,7 +818,7 @@ def test_ptp(self): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): tm.assert_series_equal(s.ptp(level=0, skipna=False), expected) - msg = "No axis named 1 for object type" " <class 'pandas.core.series.Series'>" + msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>" with pytest.raises(ValueError, match=msg): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): s.ptp(axis=1) @@ -1295,7 +1293,7 @@ class TestNLargestNSmallest: ) def test_error(self, r): dt = r.dtype - msg = "Cannot use method 'n(larg|small)est' with " "dtype {dt}".format(dt=dt) + msg = "Cannot use method 'n(larg|small)est' with dtype {dt}".format(dt=dt) args = 2, len(r), 0, -1 methods = r.nlargest, r.nsmallest for method, arg in product(methods, args): diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 6012f3986e955..f8a44b7f5639e 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -813,7 +813,7 @@ def test_dropna_empty(self): assert len(s) == 0 # invalid axis - msg = "No axis named 1 for object type" " <class 'pandas.core.series.Series'>" + msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>" with pytest.raises(ValueError, match=msg): s.dropna(axis=1) @@ -1117,9 +1117,7 @@ def test_interpolate_time_raises_for_non_timeseries(self): # When method='time' is used on a non-TimeSeries that contains a null # value, a ValueError should be raised. non_ts = Series([0, 1, 2, np.NaN]) - msg = ( - "time-weighted interpolation only works on Series.* " "with a DatetimeIndex" - ) + msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex" with pytest.raises(ValueError, match=msg): non_ts.interpolate(method="time") @@ -1417,9 +1415,7 @@ def test_interp_limit_area(self): ) # raises an error even if limit type is wrong. - msg = ( - r"Invalid limit_area: expecting one of \['inside', 'outside'\]," " got abc" - ) + msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc" with pytest.raises(ValueError, match=msg): s.interpolate(method="linear", limit_area="abc") @@ -1668,5 +1664,5 @@ def test_interpolate_timedelta_index(self, interp_methods_ind): assert_series_equal(result, expected) else: pytest.skip( - "This interpolation method is not supported for " "Timedelta Index yet." + "This interpolation method is not supported for Timedelta Index yet." ) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index aada5cca9fdc7..0c25df7997469 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -194,7 +194,7 @@ def test_logical_ops_with_index(self, op): pytest.param( ops.rand_, marks=pytest.mark.xfail( - reason="GH#22092 Index " "implementation returns " "Index", + reason="GH#22092 Index implementation returns Index", raises=AssertionError, strict=True, ), @@ -202,7 +202,7 @@ def test_logical_ops_with_index(self, op): pytest.param( ops.ror_, marks=pytest.mark.xfail( - reason="Index.get_indexer " "with non unique index", + reason="Index.get_indexer with non unique index", raises=InvalidIndexError, strict=True, ), diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py index 0ae2194543b44..125f516ab6b09 100644 --- a/pandas/tests/series/test_sorting.py +++ b/pandas/tests/series/test_sorting.py @@ -106,7 +106,7 @@ def test_sort_index(self): sorted_series = random_order.sort_index(axis=0) assert_series_equal(sorted_series, self.ts) - msg = "No axis named 1 for object type" " <class 'pandas.core.series.Series'>" + msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>" with pytest.raises(ValueError, match=msg): random_order.sort_values(axis=1) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 6be1b9a9143bf..d0ca5d82c6b33 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -120,9 +120,7 @@ def test_shift(self): # incompat tz s2 = Series(date_range("2000-01-01 09:00:00", periods=5, tz="CET"), name="foo") - msg = ( - "DatetimeArray subtraction must have the same timezones or no" " timezones" - ) + msg = "DatetimeArray subtraction must have the same timezones or no timezones" with pytest.raises(TypeError, match=msg): s - s2 @@ -915,7 +913,7 @@ def test_between_time_axis(self): assert len(ts.between_time(stime, etime)) == expected_length assert len(ts.between_time(stime, etime, axis=0)) == expected_length - msg = "No axis named 1 for object type" " <class 'pandas.core.series.Series'>" + msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>" with pytest.raises(ValueError, match=msg): ts.between_time(stime, etime, axis=1) diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 96e3c4640d2f6..6527d41eac841 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -1357,9 +1357,7 @@ def test_as_blocks(self): assert list(df_blocks.keys()) == ["Sparse[float64, nan]"] tm.assert_frame_equal(df_blocks["Sparse[float64, nan]"], df) - @pytest.mark.xfail( - reason="nan column names in _init_dict problematic " "(GH#16894)" - ) + @pytest.mark.xfail(reason="nan column names in _init_dict problematic (GH#16894)") def test_nan_columnname(self): # GH 8822 nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan]) diff --git a/pandas/tests/sparse/series/test_indexing.py b/pandas/tests/sparse/series/test_indexing.py index 525b0487a9376..c75f3b2134f91 100644 --- a/pandas/tests/sparse/series/test_indexing.py +++ b/pandas/tests/sparse/series/test_indexing.py @@ -62,7 +62,7 @@ def test_where_with_numeric_data(data): ], ) @pytest.mark.parametrize("other", [True, -100, 0.1, 100.0 + 100.0j]) -@pytest.mark.skip(reason="Wrong SparseBlock initialization " "(Segfault) " "(GH 17386)") +@pytest.mark.skip(reason="Wrong SparseBlock initialization (Segfault) (GH 17386)") def test_where_with_numeric_data_and_other(data, other): # GH 17386 lower_bound = 1.5 @@ -96,7 +96,7 @@ def test_where_with_bool_data(): @pytest.mark.parametrize("other", [True, 0, 0.1, 100.0 + 100.0j]) -@pytest.mark.skip(reason="Wrong SparseBlock initialization " "(Segfault) " "(GH 17386)") +@pytest.mark.skip(reason="Wrong SparseBlock initialization (Segfault) (GH 17386)") def test_where_with_bool_data_and_other(other): # GH 17386 data = [False, False, True, True, False, False] diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index 5619a0a11fb11..eb217283c7a83 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -1194,7 +1194,7 @@ def test_to_coo_bad_ilevel(self): def test_to_coo_duplicate_index_entries(self): ss = pd.concat([self.sparse_series[0], self.sparse_series[0]]).to_sparse() - msg = "Duplicate index entries are not allowed in to_coo" " transformation" + msg = "Duplicate index entries are not allowed in to_coo transformation" with pytest.raises(ValueError, match=msg): ss.to_coo(["A", "B"], ["C", "D"]) diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py index 5cfacaf16cffe..ea5e939b57566 100644 --- a/pandas/tests/sparse/test_indexing.py +++ b/pandas/tests/sparse/test_indexing.py @@ -441,7 +441,7 @@ def tests_indexing_with_sparse(self, kind, fill): tm.assert_sp_series_equal(s[indexer], expected) tm.assert_sp_series_equal(s.loc[indexer], expected) - msg = "iLocation based boolean indexing cannot " "use an indexable as a mask" + msg = "iLocation based boolean indexing cannot use an indexable as a mask" with pytest.raises(ValueError, match=msg): s.iloc[indexer] diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index a76f2bb04a542..c97c69c323b56 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1819,9 +1819,8 @@ def test_reset_index_multiindex_columns(self): tm.assert_frame_equal(result, df) # gh-16120: already existing column - with pytest.raises( - ValueError, match=(r"cannot insert \('A', ''\), " "already exists") - ): + msg = r"cannot insert \('A', ''\), already exists" + with pytest.raises(ValueError, match=msg): df.rename_axis("A").reset_index() # gh-16164: multiindex (tuple) full key @@ -1837,9 +1836,8 @@ def test_reset_index_multiindex_columns(self): tm.assert_frame_equal(result, expected) # with index name which is a too long tuple... - with pytest.raises( - ValueError, match=("Item must have length equal " "to number of levels.") - ): + msg = "Item must have length equal to number of levels." + with pytest.raises(ValueError, match=msg): df.rename_axis([("C", "c", "i")]).reset_index() # or too short... diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 6833757c69eaa..950d6a9595f9e 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -196,10 +196,8 @@ def test_api(self): def test_api_mi_raises(self): # GH 23679 mi = MultiIndex.from_arrays([["a", "b", "c"]]) - with pytest.raises( - AttributeError, - match="Can only use .str accessor " "with Index, not MultiIndex", - ): + msg = "Can only use .str accessor with Index, not MultiIndex" + with pytest.raises(AttributeError, match=msg): mi.str assert not hasattr(mi, "str") @@ -232,10 +230,8 @@ def test_api_per_dtype(self, box, dtype, any_skipna_inferred_dtype): assert isinstance(t.str, strings.StringMethods) else: # GH 9184, GH 23011, GH 23163 - with pytest.raises( - AttributeError, - match="Can only use .str " "accessor with string values.*", - ): + msg = "Can only use .str accessor with string values.*" + with pytest.raises(AttributeError, match=msg): t.str assert not hasattr(t, "str") @@ -1101,7 +1097,7 @@ def test_replace_literal(self): with pytest.raises(ValueError, match=msg): values.str.replace("abc", callable_repl, regex=False) - msg = "Cannot use a compiled regex as replacement pattern with" " regex=False" + msg = "Cannot use a compiled regex as replacement pattern with regex=False" with pytest.raises(ValueError, match=msg): values.str.replace(compiled_pat, "", regex=False) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 27700d778df19..2df5460a05953 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -844,7 +844,7 @@ def test_numpy_compat(self, method): pytest.param( "ls", marks=pytest.mark.xfail( - reason="GH#16425 expanding with " "offset not supported" + reason="GH#16425 expanding with offset not supported" ), ), ], @@ -1775,9 +1775,8 @@ def test_invalid_quantile_value(self): data = np.arange(5) s = Series(data) - with pytest.raises( - ValueError, match="Interpolation 'invalid'" " is not supported" - ): + msg = "Interpolation 'invalid' is not supported" + with pytest.raises(ValueError, match=msg): s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid") def test_rolling_quantile_param(self): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 822e97b21f0da..2654d83ee0c52 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -843,7 +843,7 @@ def test_apply_large_n(self): assert rs == xp def test_apply_corner(self): - msg = "Only know how to combine business day with datetime or" " timedelta" + msg = "Only know how to combine business day with datetime or timedelta" with pytest.raises(ApplyTypeError, match=msg): BDay().apply(BMonthEnd()) diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py index 700fee2d89f3c..126a1bd12ad59 100644 --- a/pandas/tests/tslibs/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -38,11 +38,9 @@ def test_parse_time_quarter_with_dash(dashed, normal): @pytest.mark.parametrize("dashed", ["-2Q1992", "2-Q1992", "4-4Q1992"]) def test_parse_time_quarter_with_dash_error(dashed): - msg = "Unknown datetime string format, " "unable to parse: {dashed}".format( - dashed=dashed - ) + msg = "Unknown datetime string format, unable to parse: {dashed}" - with pytest.raises(parsing.DateParseError, match=msg): + with pytest.raises(parsing.DateParseError, match=msg.format(dashed=dashed)): parse_time_string(dashed) @@ -113,14 +111,12 @@ def test_parsers_quarter_invalid(date_str): if date_str == "6Q-20": msg = ( "Incorrect quarterly string is given, quarter " - "must be between 1 and 4: {date_str}".format(date_str=date_str) + "must be between 1 and 4: {date_str}" ) else: - msg = "Unknown datetime string format, unable " "to parse: {date_str}".format( - date_str=date_str - ) + msg = "Unknown datetime string format, unable to parse: {date_str}" - with pytest.raises(ValueError, match=msg): + with pytest.raises(ValueError, match=msg.format(date_str=date_str)): parsing.parse_time_string(date_str) diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py index 1583420053fde..5a677d629e72d 100644 --- a/pandas/tests/util/test_assert_almost_equal.py +++ b/pandas/tests/util/test_assert_almost_equal.py @@ -40,7 +40,7 @@ def _assert_not_almost_equal(a, b, **kwargs): try: assert_almost_equal(a, b, **kwargs) msg = ( - "{a} and {b} were approximately equal " "when they shouldn't have been" + "{a} and {b} were approximately equal when they shouldn't have been" ).format(a=a, b=b) pytest.fail(msg=msg) except AssertionError: diff --git a/pandas/tests/util/test_deprecate.py b/pandas/tests/util/test_deprecate.py index e7b38bb2b700a..8fbc8037ed7c5 100644 --- a/pandas/tests/util/test_deprecate.py +++ b/pandas/tests/util/test_deprecate.py @@ -57,9 +57,8 @@ def test_deprecate_no_docstring(): def test_deprecate_wrong_docstring(): - with pytest.raises( - AssertionError, match="deprecate needs a correctly " "formatted docstring" - ): + msg = "deprecate needs a correctly formatted docstring" + with pytest.raises(AssertionError, match=msg): deprecate( "depr_func", new_func_wrong_docstring, "1.0", msg="Use new_func instead." )
https://api.github.com/repos/pandas-dev/pandas/pulls/27281
2019-07-07T19:16:47Z
2019-07-08T01:09:00Z
2019-07-08T01:09:00Z
2019-07-08T06:47:14Z
DOC: cleanup docstring for read_json and fix error in contribution guide
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 92d7cf1a79d8c..80dc8b0d8782b 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -288,7 +288,7 @@ complex changes to the documentation as well. Some other important things to know about the docs: * The *pandas* documentation consists of two parts: the docstrings in the code - itself and the docs in this folder ``pandas/doc/``. + itself and the docs in this folder ``doc/``. The docstrings provide a clear explanation of the usage of the individual functions, while the documentation in this folder consists of tutorial-like @@ -404,11 +404,11 @@ Building the documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~ So how do you build the docs? Navigate to your local -``pandas/doc/`` directory in the console and run:: +``doc/`` directory in the console and run:: python make.py html -Then you can find the HTML output in the folder ``pandas/doc/build/html/``. +Then you can find the HTML output in the folder ``doc/build/html/``. The first time you build the docs, it will take quite a while because it has to run all the code examples and build all the generated docstring pages. In subsequent @@ -448,7 +448,7 @@ You can also specify to use multiple cores to speed up the documentation build:: Open the following file in a web browser to see the full documentation you just built:: - pandas/docs/build/html/index.html + doc/build/html/index.html And you'll have the satisfaction of seeing your new and improved documentation! diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 24d41b5101a77..ada7e6f43125d 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -400,8 +400,10 @@ def read_json( .. versionadded:: 0.23.0 'table' as an allowed value for the ``orient`` argument - typ : type of object to recover (series or frame), default 'frame' - dtype : boolean or dict, default None + typ : {'frame', 'series'}, default 'frame' + The type of object to recover. + + dtype : bool or dict, default None If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. @@ -411,7 +413,7 @@ def read_json( Not applicable for ``orient='table'``. - convert_axes : boolean, default None + convert_axes : bool, default None Try to convert the axes to the proper dtypes. For all ``orient`` values except ``'table'``, default is True. @@ -420,9 +422,9 @@ def read_json( Not applicable for ``orient='table'``. - convert_dates : boolean, default True - List of columns to parse for dates; If True, then try to parse - datelike columns default is True; a column label is datelike if + convert_dates : bool or list of str, default True + List of columns to parse for dates. If True, then try to parse + datelike columns. A column label is datelike if * it ends with ``'_at'``, @@ -432,34 +434,38 @@ def read_json( * it is ``'modified'``, or - * it is ``'date'`` + * it is ``'date'``. + + keep_default_dates : bool, default True + If parsing dates, then parse the default datelike columns. - keep_default_dates : boolean, default True - If parsing dates, then parse the default datelike columns - numpy : boolean, default False + numpy : bool, default False Direct decoding to numpy arrays. Supports numeric data only, but non-numeric column and index labels are supported. Note also that the JSON ordering MUST be the same for each term if numpy=True. - precise_float : boolean, default False + + precise_float : bool, default False Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but - less precise builtin functionality - date_unit : string, default None + less precise builtin functionality. + + date_unit : str, default None The timestamp unit to detect if converting dates. The default behaviour is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. + encoding : str, default is 'utf-8' The encoding to use to decode py3 bytes. .. versionadded:: 0.19.0 - lines : boolean, default False + lines : bool, default False Read the file as a json object per line. .. versionadded:: 0.19.0 - chunksize : integer, default None + chunksize : int, optional Return JsonReader object for iteration. See the `line-delimited json docs <http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_ @@ -480,11 +486,13 @@ def read_json( Returns ------- - result : Series or DataFrame, depending on the value of `typ`. + Series or DataFrame + The type returned depends on the value of `typ`. See Also -------- - DataFrame.to_json + DataFrame.to_json : Convert a DataFrame to a JSON string. + Series.to_json : Convert a Series to a JSON string. Notes -----
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry - Contribution Guide - fix incorrect paths to doc folder - read_json() - add period to end of sentences - conform parameter types to project standard - add newlines for readability and consistency - for convert_dates parameter, fix typos and malformed description and add list as accepted type - make Returns section conform to project standard
https://api.github.com/repos/pandas-dev/pandas/pulls/27280
2019-07-07T18:54:53Z
2019-07-17T20:33:49Z
2019-07-17T20:33:49Z
2019-07-17T20:33:53Z
DOC: Explicitly include "private" ExtensionArray methods in API docs
diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index 34f76642119c8..407aab4bb1f1b 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -18,10 +18,44 @@ objects. api.extensions.register_series_accessor api.extensions.register_index_accessor api.extensions.ExtensionDtype - api.extensions.ExtensionArray .. autosummary:: :toctree: api/ :template: autosummary/class_without_autosummary.rst + api.extensions.ExtensionArray arrays.PandasArray + +.. We need this autosummary so that methods and attributes are generated. +.. Separate block, since they aren't classes. + + .. autosummary:: + :toctree: api/ + + api.extensions.ExtensionArray._concat_same_type + api.extensions.ExtensionArray._formatter + api.extensions.ExtensionArray._formatting_values + api.extensions.ExtensionArray._from_factorized + api.extensions.ExtensionArray._from_sequence + api.extensions.ExtensionArray._from_sequence_of_strings + api.extensions.ExtensionArray._ndarray_values + api.extensions.ExtensionArray._reduce + api.extensions.ExtensionArray._values_for_argsort + api.extensions.ExtensionArray._values_for_factorize + api.extensions.ExtensionArray.argsort + api.extensions.ExtensionArray.astype + api.extensions.ExtensionArray.copy + api.extensions.ExtensionArray.dropna + api.extensions.ExtensionArray.factorize + api.extensions.ExtensionArray.fillna + api.extensions.ExtensionArray.isna + api.extensions.ExtensionArray.ravel + api.extensions.ExtensionArray.repeat + api.extensions.ExtensionArray.searchsorted + api.extensions.ExtensionArray.shift + api.extensions.ExtensionArray.take + api.extensions.ExtensionArray.unique + api.extensions.ExtensionArray.dtype + api.extensions.ExtensionArray.nbytes + api.extensions.ExtensionArray.ndim + api.extensions.ExtensionArray.shape diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 2a5556ff6d357..ee796f9896b52 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -39,6 +39,39 @@ class ExtensionArray: .. versionadded:: 0.23.0 + Attributes + ---------- + dtype + nbytes + ndim + shape + + Methods + ------- + argsort + astype + copy + dropna + factorize + fillna + isna + ravel + repeat + searchsorted + shift + take + unique + _concat_same_type + _formatter + _formatting_values + _from_factorized + _from_sequence + _from_sequence_of_strings + _ndarray_values + _reduce + _values_for_argsort + _values_for_factorize + Notes ----- The interface includes the following abstract methods that must be @@ -170,7 +203,6 @@ def _from_sequence_of_strings(cls, strings, dtype=None, copy=False): Returns ------- ExtensionArray - """ raise AbstractMethodError(cls) @@ -188,7 +220,7 @@ def _from_factorized(cls, values, original): See Also -------- - pandas.factorize + factorize ExtensionArray.factorize """ raise AbstractMethodError(cls) @@ -654,7 +686,7 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ABCExtensionArra See Also -------- - pandas.factorize : Top-level factorize method that dispatches here. + factorize : Top-level factorize method that dispatches here. Notes ----- @@ -778,6 +810,11 @@ def take( When `indices` contains negative values other than ``-1`` and `allow_fill` is True. + See Also + -------- + numpy.take + api.extensions.take + Notes ----- ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``, @@ -785,11 +822,6 @@ def take( it's called by :meth:`Series.reindex`, or any other method that causes realignment, with a `fill_value`. - See Also - -------- - numpy.take - pandas.api.extensions.take - Examples -------- Here's an example implementation, which relies on casting the @@ -862,7 +894,7 @@ def _formatter(self, boxed: bool = False) -> Callable[[Any], Optional[str]]: Parameters ---------- - boxed: bool, default False + boxed : bool, default False An indicated for whether or not your array is being printed within a Series, DataFrame, or Index (True), or just by itself (False). This may be useful if you want scalar values @@ -889,6 +921,10 @@ def _formatting_values(self) -> np.ndarray: .. deprecated:: 0.24.0 Use :meth:`ExtensionArray._formatter` instead. + + Returns + ------- + array : ndarray """ return np.array(self) @@ -904,6 +940,10 @@ def ravel(self, order="C") -> ABCExtensionArray: ---------- order : {None, 'C', 'F', 'A', 'K'}, default 'C' + Returns + ------- + ExtensionArray + Notes ----- - Because ExtensionArrays are 1D-only, this is a no-op. @@ -944,6 +984,10 @@ def _ndarray_values(self) -> np.ndarray: The expectation is that this is cheap to compute, and is primarily used for interacting with our indexers. + + Returns + ------- + array : ndarray """ return np.array(self) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 9550d68f1d32b..ab2cd67e058b9 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -306,12 +306,12 @@ def _is_dtype_compat(self, other): def equals(self, other): """ - Determine if two CategorialIndex objects contain the same elements. + Determine if two CategoricalIndex objects contain the same elements. Returns ------- bool - If two CategorialIndex objects have equal elements True, + If two CategoricalIndex objects have equal elements True, otherwise False. """ if self.is_(other):
- [x] closes #24067 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/27279
2019-07-07T18:02:22Z
2019-07-15T17:11:32Z
2019-07-15T17:11:32Z
2019-07-16T22:06:38Z
BUG: Fix+test division by negative zero
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index c999c4db232e6..867122964fe59 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -1,5 +1,4 @@ import numbers -import sys from typing import Type import warnings @@ -675,7 +674,7 @@ def _maybe_mask_result(self, result, mask, other, op_name): # a float result # or our op is a divide if (is_float_dtype(other) or is_float(other)) or ( - op_name in ["rtruediv", "truediv", "rdiv", "div"] + op_name in ["rtruediv", "truediv"] ): result[mask] = np.nan return result @@ -747,8 +746,6 @@ def integer_arithmetic_method(self, other): IntegerArray._add_comparison_ops() -module = sys.modules[__name__] - _dtype_docstring = """ An ExtensionDtype for {dtype} integer data. diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 4ca1861baf237..608c2550994f1 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -120,9 +120,13 @@ def mask_zero_div_zero(x, y, result, copy=False): if zmask.any(): shape = result.shape + # Flip sign if necessary for -0.0 + zneg_mask = zmask & np.signbit(y) + zpos_mask = zmask & ~zneg_mask + nan_mask = (zmask & (x == 0)).ravel() - neginf_mask = (zmask & (x < 0)).ravel() - posinf_mask = (zmask & (x > 0)).ravel() + neginf_mask = ((zpos_mask & (x < 0)) | (zneg_mask & (x > 0))).ravel() + posinf_mask = ((zpos_mask & (x > 0)) | (zneg_mask & (x < 0))).ravel() if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py index c67a67bb31d62..f047154f2c636 100644 --- a/pandas/tests/arithmetic/conftest.py +++ b/pandas/tests/arithmetic/conftest.py @@ -30,8 +30,12 @@ def one(request): for box_cls in [pd.Index, np.array] for dtype in [np.int64, np.uint64, np.float64] ] +zeros.extend( + [box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [pd.Index, np.array]] +) zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]]) -zeros.extend([0, 0.0]) +zeros.extend([np.array(-0.0, dtype=np.float64)]) +zeros.extend([0, 0.0, -0.0]) @pytest.fixture(params=zeros) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 8179ab08895da..1fbecbab469e4 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -14,6 +14,22 @@ from pandas.core import ops import pandas.util.testing as tm + +def adjust_negative_zero(zero, expected): + """ + Helper to adjust the expected result if we are dividing by -0.0 + as opposed to 0.0 + """ + if np.signbit(np.array(zero)).any(): + # All entries in the `zero` fixture should be either + # all-negative or no-negative. + assert np.signbit(np.array(zero)).all() + + expected *= -1 + + return expected + + # ------------------------------------------------------------------ # Comparisons @@ -229,20 +245,27 @@ def test_div_zero(self, zero, numeric_idx): idx = numeric_idx expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64) + # We only adjust for Index, because Series does not yet apply + # the adjustment correctly. + expected2 = adjust_negative_zero(zero, expected) + result = idx / zero - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected2) ser_compat = Series(idx).astype("i8") / np.array(zero).astype("i8") - tm.assert_series_equal(ser_compat, Series(result)) + tm.assert_series_equal(ser_compat, Series(expected)) def test_floordiv_zero(self, zero, numeric_idx): idx = numeric_idx expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64) + # We only adjust for Index, because Series does not yet apply + # the adjustment correctly. + expected2 = adjust_negative_zero(zero, expected) result = idx // zero - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected2) ser_compat = Series(idx).astype("i8") // np.array(zero).astype("i8") - tm.assert_series_equal(ser_compat, Series(result)) + tm.assert_series_equal(ser_compat, Series(expected)) def test_mod_zero(self, zero, numeric_idx): idx = numeric_idx @@ -258,11 +281,27 @@ def test_divmod_zero(self, zero, numeric_idx): exleft = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64) exright = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=np.float64) + exleft = adjust_negative_zero(zero, exleft) result = divmod(idx, zero) tm.assert_index_equal(result[0], exleft) tm.assert_index_equal(result[1], exright) + @pytest.mark.parametrize("op", [operator.truediv, operator.floordiv]) + def test_div_negative_zero(self, zero, numeric_idx, op): + # Check that -1 / -0.0 returns np.inf, not -np.inf + if isinstance(numeric_idx, pd.UInt64Index): + return + idx = numeric_idx - 3 + + expected = pd.Index( + [-np.inf, -np.inf, -np.inf, np.nan, np.inf], dtype=np.float64 + ) + expected = adjust_negative_zero(zero, expected) + + result = op(idx, zero) + tm.assert_index_equal(result, expected) + # ------------------------------------------------------------------ @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64]) @@ -896,6 +935,26 @@ def check(series, other): check(tser, tser[::2]) check(tser, 5) + @pytest.mark.xfail( + reason="Series division does not yet fill 1/0 consistently; Index does." + ) + def test_series_divmod_zero(self): + # Check that divmod uses pandas convention for division by zero, + # which does not match numpy. + # pandas convention has + # 1/0 == np.inf + # -1/0 == -np.inf + # 1/-0.0 == -np.inf + # -1/-0.0 == np.inf + tser = tm.makeTimeSeries().rename("ts") + other = tser * 0 + + result = divmod(tser, other) + exp1 = pd.Series([np.inf] * len(tser), index=tser.index) + exp2 = pd.Series([np.nan] * len(tser), index=tser.index) + tm.assert_series_equal(result[0], exp1) + tm.assert_series_equal(result[1], exp2) + class TestUFuncCompat: @pytest.mark.parametrize( diff --git a/pandas/tests/io/pytables/test_pytables.py b/pandas/tests/io/pytables/test_pytables.py index b9f4defb4edf8..fb87749ea62e0 100644 --- a/pandas/tests/io/pytables/test_pytables.py +++ b/pandas/tests/io/pytables/test_pytables.py @@ -4337,6 +4337,7 @@ def test_store_datetime_mixed(self): df["d"] = ts.index[:3] self._check_roundtrip(df, tm.assert_frame_equal) + # FIXME: don't leave commented-out code # def test_cant_write_multiindex_table(self): # # for now, #1848 # df = DataFrame(np.random.randn(10, 4), diff --git a/setup.cfg b/setup.cfg index fee0ab60f25b5..7549bfe2e325d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -84,6 +84,8 @@ plugins = Cython.Coverage [coverage:report] ignore_errors = False show_missing = True +omit = + pandas/_version.py # Regexes for lines to exclude from consideration exclude_lines = # Have to re-enable the standard pragma
Discovered while trying to reconcile Series vs Index arithmetic behavior. A couple of other small misc changes ported from other branches, will be noted inline.
https://api.github.com/repos/pandas-dev/pandas/pulls/27278
2019-07-07T16:19:02Z
2019-07-08T12:43:11Z
2019-07-08T12:43:11Z
2019-07-08T13:46:13Z
TST/CLN: remove try block from tests/test_strings.py::TestStringMetho…
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 6833757c69eaa..f294c1134363c 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -2799,23 +2799,20 @@ def test_pipe_failures(self): tm.assert_series_equal(result, exp) - def test_slice(self): + @pytest.mark.parametrize( + "start, stop, step, expected", + [ + (2, 5, None, Series(["foo", "bar", NA, "baz"])), + (0, 3, -1, Series(["", "", NA, ""])), + (None, None, -1, Series(["owtoofaa", "owtrabaa", NA, "xuqzabaa"])), + (3, 10, 2, Series(["oto", "ato", NA, "aqx"])), + (3, 0, -1, Series(["ofa", "aba", NA, "aba"])), + ], + ) + def test_slice(self, start, stop, step, expected): values = Series(["aafootwo", "aabartwo", NA, "aabazqux"]) - - result = values.str.slice(2, 5) - exp = Series(["foo", "bar", NA, "baz"]) - tm.assert_series_equal(result, exp) - - for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2), (3, 0, -1)]: - try: - result = values.str.slice(start, stop, step) - expected = Series( - [s[start:stop:step] if not isna(s) else NA for s in values] - ) - tm.assert_series_equal(result, expected) - except IndexError: - print("failed on %s:%s:%s" % (start, stop, step)) - raise + result = values.str.slice(start, stop, step) + tm.assert_series_equal(result, expected) # mixed mixed = Series(
…ds::test_slice it looks like this was probably added for debugging. This is not necessary with parametrisation.
https://api.github.com/repos/pandas-dev/pandas/pulls/27277
2019-07-07T15:48:32Z
2019-07-08T01:13:33Z
2019-07-08T01:13:33Z
2019-07-08T06:47:54Z
TST/CLN: remove try block from test_indexing_over_size_cutoff_period_index
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 61a9909926efe..e2f40c6267493 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -604,28 +604,24 @@ def test_indexing_over_size_cutoff(): _index._SIZE_CUTOFF = old_cutoff -def test_indexing_over_size_cutoff_period_index(): +def test_indexing_over_size_cutoff_period_index(monkeypatch): # GH 27136 - old_cutoff = _index._SIZE_CUTOFF - try: - _index._SIZE_CUTOFF = 1000 + monkeypatch.setattr(_index, "_SIZE_CUTOFF", 1000) - n = 1100 - idx = pd.period_range("1/1/2000", freq="T", periods=n) - assert idx._engine.over_size_threshold + n = 1100 + idx = pd.period_range("1/1/2000", freq="T", periods=n) + assert idx._engine.over_size_threshold - s = pd.Series(np.random.randn(len(idx)), index=idx) + s = pd.Series(np.random.randn(len(idx)), index=idx) - pos = n - 1 - timestamp = idx[pos] - assert timestamp in s.index + pos = n - 1 + timestamp = idx[pos] + assert timestamp in s.index - # it works! - s[timestamp] - assert len(s.loc[[timestamp]]) > 0 - finally: - _index._SIZE_CUTOFF = old_cutoff + # it works! + s[timestamp] + assert len(s.loc[[timestamp]]) > 0 def test_indexing_unordered():
https://api.github.com/repos/pandas-dev/pandas/pulls/27276
2019-07-07T14:07:16Z
2019-07-07T15:44:50Z
2019-07-07T15:44:49Z
2019-07-08T06:46:20Z
ENH: Raise ValueError for unsupported Window functions
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 042c97a0c98b1..7b8a2a853d1a5 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1136,6 +1136,7 @@ Groupby/resample/rolling - Bug in :meth:`pandas.core.window.Rolling.median` and :meth:`pandas.core.window.Rolling.quantile` where incorrect results are returned with ``closed='left'`` and ``closed='neither'`` (:issue:`26005`) - Improved :class:`pandas.core.window.Rolling`, :class:`pandas.core.window.Window` and :class:`pandas.core.window.EWM` functions to exclude nuisance columns from results instead of raising errors and raise a ``DataError`` only if all columns are nuisance (:issue:`12537`) - Bug in :meth:`pandas.core.window.Rolling.max` and :meth:`pandas.core.window.Rolling.min` where incorrect results are returned with an empty variable window (:issue:`26005`) +- Raise a helpful exception when an unsupported weighted window function is used as an argument of :meth:`pandas.core.window.Window.aggregate` (:issue:`26597`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/base.py b/pandas/core/base.py index 15baf1bed0ecd..9480e2e425f79 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -314,9 +314,16 @@ def _try_aggregate_string_function(self, arg, *args, **kwargs): f = getattr(np, arg, None) if f is not None: - return f(self, *args, **kwargs) + try: + return f(self, *args, **kwargs) + + except (AttributeError, TypeError): + pass - raise ValueError("{arg} is an unknown string function".format(arg=arg)) + raise AttributeError( + "'{arg}' is not a valid function for " + "'{cls}' object".format(arg=arg, cls=type(self).__name__) + ) def _aggregate(self, arg, *args, **kwargs): """ diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py index 2f3b83e172795..3945a8aaa8b87 100644 --- a/pandas/tests/window/test_window.py +++ b/pandas/tests/window/test_window.py @@ -439,6 +439,22 @@ def test_numpy_compat(self, method): with pytest.raises(UnsupportedFunctionCall, match=msg): getattr(w, method)(dtype=np.float64) + @td.skip_if_no_scipy + @pytest.mark.parametrize("arg", ["median", "var", "std", "kurt", "skew"]) + def test_agg_function_support(self, arg): + df = pd.DataFrame({"A": np.arange(5)}) + roll = df.rolling(2, win_type="triang") + + msg = "'{arg}' is not a valid function for " "'Window' object".format(arg=arg) + with pytest.raises(AttributeError, match=msg): + roll.agg(arg) + + with pytest.raises(AttributeError, match=msg): + roll.agg([arg]) + + with pytest.raises(AttributeError, match=msg): + roll.agg({"A": arg}) + class TestRolling(Base): def setup_method(self, method):
- [x] ref #26597 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Weighted std could be implemented but there are also other functions weighted `Window` is lacking.
https://api.github.com/repos/pandas-dev/pandas/pulls/27275
2019-07-07T13:33:28Z
2019-07-11T16:44:38Z
2019-07-11T16:44:38Z
2019-07-11T18:06:54Z
Added 32-bit build
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml index c5676e0a2a6a0..39f862290e720 100644 --- a/ci/azure/posix.yml +++ b/ci/azure/posix.yml @@ -33,6 +33,12 @@ jobs: PATTERN: "not slow and not network" LOCALE_OVERRIDE: "it_IT.UTF-8" + py36_32bit: + ENV_FILE: ci/deps/azure-36-32bit.yaml + CONDA_PY: "36" + PATTERN: "not slow and not network" + BITS32: "yes" + py37_locale: ENV_FILE: ci/deps/azure-37-locale.yaml CONDA_PY: "37" diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml new file mode 100644 index 0000000000000..43bf0ecdd6c3e --- /dev/null +++ b/ci/deps/azure-36-32bit.yaml @@ -0,0 +1,20 @@ +name: pandas-dev +channels: + - defaults + - conda-forge +dependencies: + - gcc_linux-32 + - gcc_linux-32 + - gxx_linux-32 + - cython=0.28.2 + - numpy=1.14.* + - python-dateutil + - python=3.6.* + - pytz=2017.2 + # universal + - pytest>=4.0.2,<5.0.0 + - pytest-xdist + - pytest-mock + - pytest-azurepipelines + - hypothesis>=3.58.0 + - pip diff --git a/ci/setup_env.sh b/ci/setup_env.sh index 8f73bb228e2bd..88742e0483c7e 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -94,6 +94,12 @@ echo echo "conda env create -q --file=${ENV_FILE}" time conda env create -q --file="${ENV_FILE}" + +if [[ "$BITS32" == "yes" ]]; then + # activate 32-bit compiler + export CONDA_BUILD=1 +fi + echo "activate pandas-dev" source activate pandas-dev diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 72099f2fa3f11..403f5f11ee768 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -616,7 +616,7 @@ def test_groupby_empty(self): # check group properties assert len(gr.grouper.groupings) == 1 tm.assert_numpy_array_equal( - gr.grouper.group_info[0], np.array([], dtype=np.dtype("intp")) + gr.grouper.group_info[0], np.array([], dtype=np.dtype("int64")) ) tm.assert_numpy_array_equal(
Closes https://github.com/pandas-dev/pandas/issues/19694 Closes https://github.com/pandas-dev/pandas/issues/27227
https://api.github.com/repos/pandas-dev/pandas/pulls/27274
2019-07-07T12:42:45Z
2019-07-15T20:11:14Z
2019-07-15T20:11:14Z
2019-07-15T20:11:24Z
TST: parametrize sparse array arithmetic tests
diff --git a/pandas/conftest.py b/pandas/conftest.py index 29833ab2fc0fa..ef2758d263e1a 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1,5 +1,6 @@ from datetime import date, time, timedelta, timezone from decimal import Decimal +import operator import os from dateutil.tz import tzlocal, tzutc @@ -13,6 +14,7 @@ import pandas as pd from pandas import DataFrame +from pandas.core import ops import pandas.util.testing as tm hypothesis.settings.register_profile( @@ -163,6 +165,34 @@ def all_arithmetic_operators(request): return request.param +@pytest.fixture( + params=[ + operator.add, + ops.radd, + operator.sub, + ops.rsub, + operator.mul, + ops.rmul, + operator.truediv, + ops.rtruediv, + operator.floordiv, + ops.rfloordiv, + operator.mod, + ops.rmod, + operator.pow, + ops.rpow, + ] +) +def all_arithmetic_functions(request): + """ + Fixture for operator and roperator arithmetic functions. + + Note: This includes divmod and rdivmod, whereas all_arithmetic_operators + does not. + """ + return request.param + + _all_numeric_reductions = [ "sum", "max", diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index 7bfedff217719..0f8f3d261c3b3 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -4,10 +4,23 @@ import pytest import pandas as pd +from pandas.core import ops from pandas.core.sparse.api import SparseDtype import pandas.util.testing as tm +@pytest.fixture(params=["integer", "block"]) +def kind(request): + """kind kwarg to pass to SparseArray/SparseSeries""" + return request.param + + +@pytest.fixture(params=[True, False]) +def mix(request): + # whether to operate op(sparse, dense) instead of op(sparse, sparse) + return request.param + + @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning") @pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning") class TestSparseArrayArithmetics: @@ -18,60 +31,25 @@ class TestSparseArrayArithmetics: def _assert(self, a, b): tm.assert_numpy_array_equal(a, b) - def _check_numeric_ops(self, a, b, a_dense, b_dense): + def _check_numeric_ops(self, a, b, a_dense, b_dense, mix, op): with np.errstate(invalid="ignore", divide="ignore"): - # Unfortunately, trying to wrap the computation of each expected - # value is with np.errstate() is too tedious. - - # sparse & sparse - self._assert((a + b).to_dense(), a_dense + b_dense) - self._assert((b + a).to_dense(), b_dense + a_dense) - - self._assert((a - b).to_dense(), a_dense - b_dense) - self._assert((b - a).to_dense(), b_dense - a_dense) - - self._assert((a * b).to_dense(), a_dense * b_dense) - self._assert((b * a).to_dense(), b_dense * a_dense) - - # pandas uses future division - self._assert((a / b).to_dense(), a_dense * 1.0 / b_dense) - self._assert((b / a).to_dense(), b_dense * 1.0 / a_dense) - - # ToDo: FIXME in GH 13843 - if not (self._base == pd.Series and a.dtype.subtype == np.dtype("int64")): - self._assert((a // b).to_dense(), a_dense // b_dense) - self._assert((b // a).to_dense(), b_dense // a_dense) - - self._assert((a % b).to_dense(), a_dense % b_dense) - self._assert((b % a).to_dense(), b_dense % a_dense) - - self._assert((a ** b).to_dense(), a_dense ** b_dense) - self._assert((b ** a).to_dense(), b_dense ** a_dense) - - # sparse & dense - self._assert((a + b_dense).to_dense(), a_dense + b_dense) - self._assert((b_dense + a).to_dense(), b_dense + a_dense) - - self._assert((a - b_dense).to_dense(), a_dense - b_dense) - self._assert((b_dense - a).to_dense(), b_dense - a_dense) - - self._assert((a * b_dense).to_dense(), a_dense * b_dense) - self._assert((b_dense * a).to_dense(), b_dense * a_dense) + if op in [operator.floordiv, ops.rfloordiv]: + # FIXME: GH#13843 + if self._base == pd.Series and a.dtype.subtype == np.dtype("int64"): + pytest.xfail("Not defined/working. See GH#13843") - # pandas uses future division - self._assert((a / b_dense).to_dense(), a_dense * 1.0 / b_dense) - self._assert((b_dense / a).to_dense(), b_dense * 1.0 / a_dense) + if mix: + result = op(a, b_dense).to_dense() + else: + result = op(a, b).to_dense() - # ToDo: FIXME in GH 13843 - if not (self._base == pd.Series and a.dtype.subtype == np.dtype("int64")): - self._assert((a // b_dense).to_dense(), a_dense // b_dense) - self._assert((b_dense // a).to_dense(), b_dense // a_dense) + if op in [operator.truediv, ops.rtruediv]: + # pandas uses future division + expected = op(a_dense * 1.0, b_dense) + else: + expected = op(a_dense, b_dense) - self._assert((a % b_dense).to_dense(), a_dense % b_dense) - self._assert((b_dense % a).to_dense(), b_dense % a_dense) - - self._assert((a ** b_dense).to_dense(), a_dense ** b_dense) - self._assert((b_dense ** a).to_dense(), b_dense ** a_dense) + self._assert(result, expected) def _check_bool_result(self, res): assert isinstance(res, self._klass) @@ -136,289 +114,275 @@ def _check_logical_ops(self, a, b, a_dense, b_dense): self._check_bool_result(a | b_dense) self._assert((a | b_dense).to_dense(), a_dense | b_dense) - def test_float_scalar(self): + @pytest.mark.parametrize("scalar", [0, 1, 3]) + @pytest.mark.parametrize("fill_value", [None, 0, 2]) + def test_float_scalar( + self, kind, mix, all_arithmetic_functions, fill_value, scalar + ): + op = all_arithmetic_functions values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) - for kind in ["integer", "block"]: - a = self._klass(values, kind=kind) - self._check_numeric_ops(a, 1, values, 1) - self._check_numeric_ops(a, 0, values, 0) - self._check_numeric_ops(a, 3, values, 3) - - a = self._klass(values, kind=kind, fill_value=0) - self._check_numeric_ops(a, 1, values, 1) - self._check_numeric_ops(a, 0, values, 0) - self._check_numeric_ops(a, 3, values, 3) - - a = self._klass(values, kind=kind, fill_value=2) - self._check_numeric_ops(a, 1, values, 1) - self._check_numeric_ops(a, 0, values, 0) - self._check_numeric_ops(a, 3, values, 3) + a = self._klass(values, kind=kind, fill_value=fill_value) + self._check_numeric_ops(a, scalar, values, scalar, mix, op) - def test_float_scalar_comparison(self): + def test_float_scalar_comparison(self, kind): values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) - for kind in ["integer", "block"]: - a = self._klass(values, kind=kind) - self._check_comparison_ops(a, 1, values, 1) - self._check_comparison_ops(a, 0, values, 0) - self._check_comparison_ops(a, 3, values, 3) + a = self._klass(values, kind=kind) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) - a = self._klass(values, kind=kind, fill_value=0) - self._check_comparison_ops(a, 1, values, 1) - self._check_comparison_ops(a, 0, values, 0) - self._check_comparison_ops(a, 3, values, 3) + a = self._klass(values, kind=kind, fill_value=0) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) - a = self._klass(values, kind=kind, fill_value=2) - self._check_comparison_ops(a, 1, values, 1) - self._check_comparison_ops(a, 0, values, 0) - self._check_comparison_ops(a, 3, values, 3) + a = self._klass(values, kind=kind, fill_value=2) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) - def test_float_same_index(self): + def test_float_same_index(self, kind, mix, all_arithmetic_functions): # when sp_index are the same - for kind in ["integer", "block"]: - values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) - rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) + op = all_arithmetic_functions + values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) - a = self._klass(values, kind=kind) - b = self._klass(rvalues, kind=kind) - self._check_numeric_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind) + b = self._klass(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) - rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) + values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) + rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind, fill_value=0) - self._check_numeric_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind, fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - def test_float_same_index_comparison(self): + def test_float_same_index_comparison(self, kind): # when sp_index are the same - for kind in ["integer", "block"]: - values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) - rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) + values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) - a = self._klass(values, kind=kind) - b = self._klass(rvalues, kind=kind) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind) + b = self._klass(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) - values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) - rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) + values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) + rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind, fill_value=0) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + def test_float_array(self, kind, mix, all_arithmetic_functions): + op = all_arithmetic_functions - def test_float_array(self): values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) - for kind in ["integer", "block"]: - a = self._klass(values, kind=kind) - b = self._klass(rvalues, kind=kind) - self._check_numeric_ops(a, b, values, rvalues) - self._check_numeric_ops(a, b * 0, values, rvalues * 0) + a = self._klass(values, kind=kind) + b = self._klass(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind) - self._check_numeric_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind, fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind, fill_value=0) - self._check_numeric_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=1) + b = self._klass(rvalues, kind=kind, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - a = self._klass(values, kind=kind, fill_value=1) - b = self._klass(rvalues, kind=kind, fill_value=2) - self._check_numeric_ops(a, b, values, rvalues) + def test_float_array_different_kind(self, mix, all_arithmetic_functions): + op = all_arithmetic_functions - def test_float_array_different_kind(self): values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) a = self._klass(values, kind="integer") b = self._klass(rvalues, kind="block") - self._check_numeric_ops(a, b, values, rvalues) - self._check_numeric_ops(a, b * 0, values, rvalues * 0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) a = self._klass(values, kind="integer", fill_value=0) b = self._klass(rvalues, kind="block") - self._check_numeric_ops(a, b, values, rvalues) + self._check_numeric_ops(a, b, values, rvalues, mix, op) a = self._klass(values, kind="integer", fill_value=0) b = self._klass(rvalues, kind="block", fill_value=0) - self._check_numeric_ops(a, b, values, rvalues) + self._check_numeric_ops(a, b, values, rvalues, mix, op) a = self._klass(values, kind="integer", fill_value=1) b = self._klass(rvalues, kind="block", fill_value=2) - self._check_numeric_ops(a, b, values, rvalues) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - def test_float_array_comparison(self): + def test_float_array_comparison(self, kind): values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) - for kind in ["integer", "block"]: - a = self._klass(values, kind=kind) - b = self._klass(rvalues, kind=kind) - self._check_comparison_ops(a, b, values, rvalues) - self._check_comparison_ops(a, b * 0, values, rvalues * 0) + a = self._klass(values, kind=kind) + b = self._klass(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind, fill_value=0) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) - a = self._klass(values, kind=kind, fill_value=1) - b = self._klass(rvalues, kind=kind, fill_value=2) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=1) + b = self._klass(rvalues, kind=kind, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + def test_int_array(self, kind, mix, all_arithmetic_functions): + op = all_arithmetic_functions - def test_int_array(self): # have to specify dtype explicitly until fixing GH 667 dtype = np.int64 values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype) rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype) - for kind in ["integer", "block"]: - a = self._klass(values, dtype=dtype, kind=kind) - assert a.dtype == SparseDtype(dtype) - b = self._klass(rvalues, dtype=dtype, kind=kind) - assert b.dtype == SparseDtype(dtype) - - self._check_numeric_ops(a, b, values, rvalues) - self._check_numeric_ops(a, b * 0, values, rvalues * 0) + a = self._klass(values, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = self._klass(rvalues, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) - a = self._klass(values, fill_value=0, dtype=dtype, kind=kind) - assert a.dtype == SparseDtype(dtype) - b = self._klass(rvalues, dtype=dtype, kind=kind) - assert b.dtype == SparseDtype(dtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) - self._check_numeric_ops(a, b, values, rvalues) + a = self._klass(values, fill_value=0, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = self._klass(rvalues, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) - a = self._klass(values, fill_value=0, dtype=dtype, kind=kind) - assert a.dtype == SparseDtype(dtype) - b = self._klass(rvalues, fill_value=0, dtype=dtype, kind=kind) - assert b.dtype == SparseDtype(dtype) - self._check_numeric_ops(a, b, values, rvalues) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - a = self._klass(values, fill_value=1, dtype=dtype, kind=kind) - assert a.dtype == SparseDtype(dtype, fill_value=1) - b = self._klass(rvalues, fill_value=2, dtype=dtype, kind=kind) - assert b.dtype == SparseDtype(dtype, fill_value=2) - self._check_numeric_ops(a, b, values, rvalues) + a = self._klass(values, fill_value=0, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = self._klass(rvalues, fill_value=0, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - def test_int_array_comparison(self): + a = self._klass(values, fill_value=1, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype, fill_value=1) + b = self._klass(rvalues, fill_value=2, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + def test_int_array_comparison(self, kind): + dtype = "int64" # int32 NI ATM - for dtype in ["int64"]: - values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype) - rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype) - for kind in ["integer", "block"]: - a = self._klass(values, dtype=dtype, kind=kind) - b = self._klass(rvalues, dtype=dtype, kind=kind) - self._check_comparison_ops(a, b, values, rvalues) - self._check_comparison_ops(a, b * 0, values, rvalues * 0) + values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype) + rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype) + + a = self._klass(values, dtype=dtype, kind=kind) + b = self._klass(rvalues, dtype=dtype, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) - a = self._klass(values, dtype=dtype, kind=kind, fill_value=0) - b = self._klass(rvalues, dtype=dtype, kind=kind) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, dtype=dtype, kind=kind, fill_value=0) + b = self._klass(rvalues, dtype=dtype, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) - a = self._klass(values, dtype=dtype, kind=kind, fill_value=0) - b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=0) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, dtype=dtype, kind=kind, fill_value=0) + b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) - a = self._klass(values, dtype=dtype, kind=kind, fill_value=1) - b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=2) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, dtype=dtype, kind=kind, fill_value=1) + b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) - def test_bool_same_index(self): + @pytest.mark.parametrize("fill_value", [True, False, np.nan]) + def test_bool_same_index(self, kind, fill_value): # GH 14000 # when sp_index are the same - for kind in ["integer", "block"]: - values = self._base([True, False, True, True], dtype=np.bool) - rvalues = self._base([True, False, True, True], dtype=np.bool) - - for fill_value in [True, False, np.nan]: - a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value) - b = self._klass( - rvalues, kind=kind, dtype=np.bool, fill_value=fill_value - ) - self._check_logical_ops(a, b, values, rvalues) - - def test_bool_array_logical(self): + values = self._base([True, False, True, True], dtype=np.bool) + rvalues = self._base([True, False, True, True], dtype=np.bool) + + a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value) + b = self._klass(rvalues, kind=kind, dtype=np.bool, fill_value=fill_value) + self._check_logical_ops(a, b, values, rvalues) + + @pytest.mark.parametrize("fill_value", [True, False, np.nan]) + def test_bool_array_logical(self, kind, fill_value): # GH 14000 # when sp_index are the same - for kind in ["integer", "block"]: - values = self._base([True, False, True, False, True, True], dtype=np.bool) - rvalues = self._base([True, False, False, True, False, True], dtype=np.bool) + values = self._base([True, False, True, False, True, True], dtype=np.bool) + rvalues = self._base([True, False, False, True, False, True], dtype=np.bool) - for fill_value in [True, False, np.nan]: - a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value) - b = self._klass( - rvalues, kind=kind, dtype=np.bool, fill_value=fill_value - ) - self._check_logical_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value) + b = self._klass(rvalues, kind=kind, dtype=np.bool, fill_value=fill_value) + self._check_logical_ops(a, b, values, rvalues) - def test_mixed_array_float_int(self): + def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions): + op = all_arithmetic_functions - for rdtype in ["int64"]: - values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) - rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) + rdtype = "int64" - for kind in ["integer", "block"]: - a = self._klass(values, kind=kind) - b = self._klass(rvalues, kind=kind) - assert b.dtype == SparseDtype(rdtype) + values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) - self._check_numeric_ops(a, b, values, rvalues) - self._check_numeric_ops(a, b * 0, values, rvalues * 0) + a = self._klass(values, kind=kind) + b = self._klass(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind) - assert b.dtype == SparseDtype(rdtype) - self._check_numeric_ops(a, b, values, rvalues) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind, fill_value=0) - assert b.dtype == SparseDtype(rdtype) - self._check_numeric_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - a = self._klass(values, kind=kind, fill_value=1) - b = self._klass(rvalues, kind=kind, fill_value=2) - assert b.dtype == SparseDtype(rdtype, fill_value=2) - self._check_numeric_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind, fill_value=0) + assert b.dtype == SparseDtype(rdtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) - def test_mixed_array_comparison(self): + a = self._klass(values, kind=kind, fill_value=1) + b = self._klass(rvalues, kind=kind, fill_value=2) + assert b.dtype == SparseDtype(rdtype, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + def test_mixed_array_comparison(self, kind): + rdtype = "int64" # int32 NI ATM - for rdtype in ["int64"]: - values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) - rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) - for kind in ["integer", "block"]: - a = self._klass(values, kind=kind) - b = self._klass(rvalues, kind=kind) - assert b.dtype == SparseDtype(rdtype) + values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) + + a = self._klass(values, kind=kind) + b = self._klass(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) - self._check_comparison_ops(a, b, values, rvalues) - self._check_comparison_ops(a, b * 0, values, rvalues * 0) + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind) - assert b.dtype == SparseDtype(rdtype) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + self._check_comparison_ops(a, b, values, rvalues) - a = self._klass(values, kind=kind, fill_value=0) - b = self._klass(rvalues, kind=kind, fill_value=0) - assert b.dtype == SparseDtype(rdtype) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=0) + b = self._klass(rvalues, kind=kind, fill_value=0) + assert b.dtype == SparseDtype(rdtype) + self._check_comparison_ops(a, b, values, rvalues) - a = self._klass(values, kind=kind, fill_value=1) - b = self._klass(rvalues, kind=kind, fill_value=2) - assert b.dtype == SparseDtype(rdtype, fill_value=2) - self._check_comparison_ops(a, b, values, rvalues) + a = self._klass(values, kind=kind, fill_value=1) + b = self._klass(rvalues, kind=kind, fill_value=2) + assert b.dtype == SparseDtype(rdtype, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) class TestSparseSeriesArithmetic(TestSparseArrayArithmetics): @@ -429,7 +393,9 @@ class TestSparseSeriesArithmetic(TestSparseArrayArithmetics): def _assert(self, a, b): tm.assert_series_equal(a, b) - def test_alignment(self): + def test_alignment(self, mix, all_arithmetic_functions): + op = all_arithmetic_functions + da = pd.Series(np.arange(4)) db = pd.Series(np.arange(4), index=[1, 2, 3, 4]) @@ -437,13 +403,13 @@ def test_alignment(self): sb = pd.SparseSeries( np.arange(4), index=[1, 2, 3, 4], dtype=np.int64, fill_value=0 ) - self._check_numeric_ops(sa, sb, da, db) + self._check_numeric_ops(sa, sb, da, db, mix, op) sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan) sb = pd.SparseSeries( np.arange(4), index=[1, 2, 3, 4], dtype=np.int64, fill_value=np.nan ) - self._check_numeric_ops(sa, sb, da, db) + self._check_numeric_ops(sa, sb, da, db, mix, op) da = pd.Series(np.arange(4)) db = pd.Series(np.arange(4), index=[10, 11, 12, 13]) @@ -452,13 +418,13 @@ def test_alignment(self): sb = pd.SparseSeries( np.arange(4), index=[10, 11, 12, 13], dtype=np.int64, fill_value=0 ) - self._check_numeric_ops(sa, sb, da, db) + self._check_numeric_ops(sa, sb, da, db, mix, op) sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan) sb = pd.SparseSeries( np.arange(4), index=[10, 11, 12, 13], dtype=np.int64, fill_value=np.nan ) - self._check_numeric_ops(sa, sb, da, db) + self._check_numeric_ops(sa, sb, da, db, mix, op) @pytest.mark.parametrize("op", [operator.eq, operator.add])
Motivated by a tedious troubleshooting process in a different branch. I'm pretty sure we can take this even further, saving it for another pass since the diff here is already pretty big.
https://api.github.com/repos/pandas-dev/pandas/pulls/27271
2019-07-07T04:23:09Z
2019-07-09T21:54:26Z
2019-07-09T21:54:25Z
2019-07-10T00:03:41Z
CLN: Remove unused SparseArray code
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py index 2332da46574c5..65976021f5053 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse.py @@ -1774,7 +1774,6 @@ def sparse_arithmetic_method(self, other): else: other = np.asarray(other) with np.errstate(all="ignore"): - # TODO: delete sparse stuff in core/ops.py # TODO: look into _wrap_result if len(self) != len(other): raise AssertionError( diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index df2907bf591dd..545f98a02439a 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -1401,12 +1401,6 @@ def _get_method_wrappers(cls): arith_special = _arith_method_SERIES comp_special = _comp_method_SERIES bool_special = _bool_method_SERIES - elif issubclass(cls, ABCSparseArray): - arith_flex = None - comp_flex = None - arith_special = _arith_method_SPARSE_ARRAY - comp_special = _arith_method_SPARSE_ARRAY - bool_special = _arith_method_SPARSE_ARRAY elif issubclass(cls, ABCDataFrame): # Same for DataFrame and SparseDataFrame arith_flex = _arith_method_FRAME @@ -2336,47 +2330,6 @@ def _sparse_series_op(left, right, op, name): return left._constructor(result, index=new_index, name=new_name) -def _arith_method_SPARSE_ARRAY(cls, op, special): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. - """ - op_name = _get_op_name(op, special) - - def wrapper(self, other): - from pandas.core.arrays.sparse.array import ( - SparseArray, - _sparse_array_op, - _wrap_result, - _get_fill, - ) - - if isinstance(other, np.ndarray): - if len(self) != len(other): - raise AssertionError( - "length mismatch: {self} vs. {other}".format( - self=len(self), other=len(other) - ) - ) - if not isinstance(other, SparseArray): - dtype = getattr(other, "dtype", None) - other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) - return _sparse_array_op(self, other, op, op_name) - elif is_scalar(other): - with np.errstate(all="ignore"): - fill = op(_get_fill(self), np.asarray(other)) - result = op(self.sp_values, other) - - return _wrap_result(op_name, result, self.sp_index, fill) - else: # pragma: no cover - raise TypeError( - "operation with {other} not supported".format(other=type(other)) - ) - - wrapper.__name__ = op_name - return wrapper - - def maybe_dispatch_ufunc_to_dunder_op( self: ArrayLike, ufunc: Callable, method: str, *inputs: ArrayLike, **kwargs: Any ):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27269
2019-07-07T02:50:30Z
2019-07-07T14:13:40Z
2019-07-07T14:13:40Z
2019-07-07T14:55:43Z
REF: Avoid dispatching Series ops to pd.Index
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index df2907bf591dd..5738dd9e4a128 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -43,6 +43,7 @@ ABCSeries, ABCSparseArray, ABCSparseSeries, + ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna, notna @@ -1709,10 +1710,30 @@ def wrapper(left, right): # Note: we cannot use dispatch_to_index_op because # that may incorrectly raise TypeError when we # should get NullFrequencyError - result = op(pd.Index(left), right) - return construct_result( - left, result, index=left.index, name=res_name, dtype=result.dtype - ) + orig_right = right + if is_scalar(right): + # broadcast and wrap in a TimedeltaIndex + assert np.isnat(right) + right = np.broadcast_to(right, left.shape) + right = pd.TimedeltaIndex(right) + + assert isinstance(right, (pd.TimedeltaIndex, ABCTimedeltaArray, ABCSeries)) + try: + result = op(left._values, right) + except NullFrequencyError: + if orig_right is not right: + # i.e. scalar timedelta64('NaT') + # We get a NullFrequencyError because we broadcast to + # TimedeltaIndex, but this should be TypeError. + raise TypeError( + "incompatible type for a datetime/timedelta " + "operation [{name}]".format(name=op.__name__) + ) + raise + + # We do not pass dtype to ensure that the Series constructor + # does inference in the case where `result` has object-dtype. + return construct_result(left, result, index=left.index, name=res_name) lvalues = left.values rvalues = right
We have some semi-circular dispatch logic between non-EA Series and pd.Index ops. This avoids that circularity by implementing the relevant logic directly in the Series implementation. After this I'm going to start migrating the Index implementations to use the Series implementation, after which we will have One True Implementation (also have to migrate RangeIndex and IntegerArray implementations)
https://api.github.com/repos/pandas-dev/pandas/pulls/27268
2019-07-06T21:19:13Z
2019-07-08T01:14:26Z
2019-07-08T01:14:26Z
2019-07-08T01:22:34Z
ENH: Add Series method to explode a list-like column
diff --git a/Makefile b/Makefile index a02fe145c5f0e..baceefe6d49ff 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,7 @@ .PHONY : develop build clean clean_pyc doc lint-diff black +all: develop + clean: -python setup.py clean diff --git a/asv_bench/benchmarks/io/parsers.py b/asv_bench/benchmarks/io/parsers.py index 40256e043a008..c5e099bd44eac 100644 --- a/asv_bench/benchmarks/io/parsers.py +++ b/asv_bench/benchmarks/io/parsers.py @@ -10,7 +10,7 @@ pass -class DoesStringLookLikeDatetime(object): +class DoesStringLookLikeDatetime: params = (["2Q2005", "0.0", "10000"],) param_names = ["value"] @@ -23,7 +23,7 @@ def time_check_datetimes(self, value): _does_string_look_like_datetime(obj) -class ConcatDateCols(object): +class ConcatDateCols: params = ([1234567890, "AAAA"], [1, 2]) param_names = ["value", "dim"] diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index f41e13163b3f5..1aed756b841a5 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -240,4 +240,17 @@ def time_qcut_datetime(self, bins): pd.qcut(self.datetime_series, bins) +class Explode: + param_names = ["n_rows", "max_list_length"] + params = [[100, 1000, 10000], [3, 5, 10]] + + def setup(self, n_rows, max_list_length): + + data = [np.arange(np.random.randint(max_list_length)) for _ in range(n_rows)] + self.series = pd.Series(data) + + def time_explode(self, n_rows, max_list_length): + self.series.explode() + + from .pandas_vb_common import setup # noqa: F401 diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index e2835c5156f55..6038a2ab4bd9f 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -219,7 +219,7 @@ def time_series_datetimeindex_repr(self): getattr(self.s, "a", None) -class All(object): +class All: params = [[10 ** 3, 10 ** 6], ["fast", "slow"]] param_names = ["N", "case"] @@ -232,7 +232,7 @@ def time_all(self, N, case): self.s.all() -class Any(object): +class Any: params = [[10 ** 3, 10 ** 6], ["fast", "slow"]] param_names = ["N", "case"] @@ -245,7 +245,7 @@ def time_any(self, N, case): self.s.any() -class NanOps(object): +class NanOps: params = [ [ diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index a74527df25f9b..1020b773f8acb 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -293,7 +293,7 @@ def time_format_YYYYMMDD(self): to_datetime(self.stringsD, format="%Y%m%d") -class ToDatetimeCacheSmallCount(object): +class ToDatetimeCacheSmallCount: params = ([True, False], [50, 500, 5000, 100000]) param_names = ["cache", "count"] diff --git a/ci/code_checks.sh b/ci/code_checks.sh index fec2a88292280..96a8440d85694 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -156,7 +156,7 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for python2 new-style classes and for empty parentheses' ; echo $MSG - invgrep -R --include="*.py" --include="*.pyx" -E "class\s\S*\((object)?\):" pandas scripts + invgrep -R --include="*.py" --include="*.pyx" -E "class\s\S*\((object)?\):" pandas asv_bench/benchmarks scripts RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for backticks incorrectly rendering because of missing spaces' ; echo $MSG diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst index c0b58fd2d99f5..b1c6172fb1261 100644 --- a/doc/source/reference/frame.rst +++ b/doc/source/reference/frame.rst @@ -239,6 +239,7 @@ Reshaping, sorting, transposing DataFrame.unstack DataFrame.swapaxes DataFrame.melt + DataFrame.explode DataFrame.squeeze DataFrame.to_xarray DataFrame.T diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst index 8d2a764c33a43..7ba625c141f24 100644 --- a/doc/source/reference/series.rst +++ b/doc/source/reference/series.rst @@ -245,6 +245,7 @@ Reshaping, sorting Series.sort_index Series.swaplevel Series.unstack + Series.explode Series.searchsorted Series.ravel Series.repeat @@ -590,4 +591,3 @@ Sparse SparseSeries.to_coo SparseSeries.from_coo - diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index b7b6dd0a69c24..0470a6c0c2f42 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -801,3 +801,53 @@ Note to subdivide over multiple columns we can pass in a list to the df.pivot_table( values=['val0'], index='row', columns=['item', 'col'], aggfunc=['mean']) + +.. _reshaping.explode: + +Exploding a list-like column +---------------------------- + +.. versionadded:: 0.25.0 + +Sometimes the values in a column are list-like. + +.. ipython:: python + + keys = ['panda1', 'panda2', 'panda3'] + values = [['eats', 'shoots'], ['shoots', 'leaves'], ['eats', 'leaves']] + df = pd.DataFrame({'keys': keys, 'values': values}) + df + +We can 'explode' the ``values`` column, transforming each list-like to a separate row, by using :meth:`~Series.explode`. This will replicate the index values from the original row: + +.. ipython:: python + + df['values'].explode() + +You can also explode the column in the ``DataFrame``. + +.. ipython:: python + + df.explode('values') + +:meth:`Series.explode` will replace empty lists with ``np.nan`` and preserve scalar entries. The dtype of the resulting ``Series`` is always ``object``. + +.. ipython:: python + + s = pd.Series([[1, 2, 3], 'foo', [], ['a', 'b']]) + s + s.explode() + +Here is a typical usecase. You have comma separated strings in a column and want to expand this. + +.. ipython:: python + + df = pd.DataFrame([{'var1': 'a,b,c', 'var2': 1}, + {'var1': 'd,e,f', 'var2': 2}]) + df + +Creating a long form DataFrame is now straightforward using explode and chained operations + +.. ipython:: python + + df.assign(var1=df.var1.str.split(',')).explode('var1') diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 59cd6615b7395..a08159e6c3199 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -182,6 +182,28 @@ The repr now looks like this: json_normalize(data, max_level=1) +.. _whatsnew_0250.enhancements.explode: + +Series.explode to split list-like values to rows +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:class:`Series` and :class:`DataFrame` have gained the :meth:`DataFrame.explode` methods to transform list-likes to individual rows. See :ref:`section on Exploding list-like column <reshaping.explode>` in docs for more information (:issue:`16538`, :issue:`10511`) + + +Here is a typical usecase. You have comma separated string in a column. + +.. ipython:: python + + df = pd.DataFrame([{'var1': 'a,b,c', 'var2': 1}, + {'var1': 'd,e,f', 'var2': 2}]) + df + +Creating a long form ``DataFrame`` is now straightforward using chained operations + +.. ipython:: python + + df.assign(var1=df.var1.str.split(',')).explode('var1') + .. _whatsnew_0250.enhancements.other: Other enhancements diff --git a/pandas/_libs/lib.pxd b/pandas/_libs/lib.pxd new file mode 100644 index 0000000000000..12aca9dabe2e7 --- /dev/null +++ b/pandas/_libs/lib.pxd @@ -0,0 +1 @@ +cdef bint c_is_list_like(object, bint) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1936404b75602..27ee685acfde7 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1,3 +1,4 @@ +from collections import abc from decimal import Decimal from fractions import Fraction from numbers import Number @@ -886,6 +887,60 @@ def is_period(val: object) -> bool: return util.is_period_object(val) +def is_list_like(obj: object, allow_sets: bool = True): + """ + Check if the object is list-like. + + Objects that are considered list-like are for example Python + lists, tuples, sets, NumPy arrays, and Pandas Series. + + Strings and datetime objects, however, are not considered list-like. + + Parameters + ---------- + obj : The object to check + allow_sets : boolean, default True + If this parameter is False, sets will not be considered list-like + + .. versionadded:: 0.24.0 + + Returns + ------- + is_list_like : bool + Whether `obj` has list-like properties. + + Examples + -------- + >>> is_list_like([1, 2, 3]) + True + >>> is_list_like({1, 2, 3}) + True + >>> is_list_like(datetime(2017, 1, 1)) + False + >>> is_list_like("foo") + False + >>> is_list_like(1) + False + >>> is_list_like(np.array([2])) + True + >>> is_list_like(np.array(2))) + False + """ + return c_is_list_like(obj, allow_sets) + + +cdef inline bint c_is_list_like(object obj, bint allow_sets): + return ( + isinstance(obj, abc.Iterable) + # we do not count strings/unicode/bytes as list-like + and not isinstance(obj, (str, bytes)) + # exclude zero-dimensional numpy arrays, effectively scalars + and not (util.is_array(obj) and obj.ndim == 0) + # exclude sets if allow_sets is False + and not (allow_sets is False and isinstance(obj, abc.Set)) + ) + + _TYPE_MAP = { 'categorical': 'categorical', 'category': 'categorical', diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 35b2ab4aa5326..f229de002ce5c 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -2,8 +2,11 @@ import cython from cython import Py_ssize_t from numpy cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, - uint32_t, uint64_t, float32_t, float64_t) - + uint32_t, uint64_t, float32_t, float64_t, ndarray) +cimport numpy as cnp +import numpy as np +from pandas._libs.lib cimport c_is_list_like +cnp.import_array() ctypedef fused reshape_t: uint8_t @@ -91,3 +94,59 @@ unstack_int64 = unstack["int64_t"] unstack_float32 = unstack["float32_t"] unstack_float64 = unstack["float64_t"] unstack_object = unstack["object"] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def explode(ndarray[object] values): + """ + transform array list-likes to long form + preserve non-list entries + + Parameters + ---------- + values : object ndarray + + Returns + ------- + tuple(values, counts) + """ + cdef: + Py_ssize_t i, j, count, n + object v + ndarray[object] result + ndarray[int64_t] counts + + # find the resulting len + n = len(values) + counts = np.zeros(n, dtype='int64') + for i in range(n): + v = values[i] + if c_is_list_like(v, False): + if len(v): + counts[i] += len(v) + else: + # empty list-like, use a nan marker + counts[i] += 1 + else: + counts[i] += 1 + + result = np.empty(counts.sum(), dtype='object') + count = 0 + for i in range(n): + v = values[i] + + if c_is_list_like(v, False): + if len(v): + for j in range(len(v)): + result[count] = v[j] + count += 1 + else: + # empty list-like, use a nan marker + result[count] = np.nan + count += 1 + else: + # replace with the existing scalar + result[count] = v + count += 1 + return result, counts diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 9373ea18e8a24..461b5cc6232cd 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -23,6 +23,8 @@ is_interval = lib.is_interval +is_list_like = lib.is_list_like + def is_number(obj): """ @@ -241,60 +243,6 @@ def is_re_compilable(obj): return True -def is_list_like(obj, allow_sets=True): - """ - Check if the object is list-like. - - Objects that are considered list-like are for example Python - lists, tuples, sets, NumPy arrays, and Pandas Series. - - Strings and datetime objects, however, are not considered list-like. - - Parameters - ---------- - obj : The object to check - allow_sets : boolean, default True - If this parameter is False, sets will not be considered list-like - - .. versionadded:: 0.24.0 - - Returns - ------- - is_list_like : bool - Whether `obj` has list-like properties. - - Examples - -------- - >>> is_list_like([1, 2, 3]) - True - >>> is_list_like({1, 2, 3}) - True - >>> is_list_like(datetime(2017, 1, 1)) - False - >>> is_list_like("foo") - False - >>> is_list_like(1) - False - >>> is_list_like(np.array([2])) - True - >>> is_list_like(np.array(2))) - False - """ - - return ( - isinstance(obj, abc.Iterable) - and - # we do not count strings/unicode/bytes as list-like - not isinstance(obj, (str, bytes)) - and - # exclude zero-dimensional numpy arrays, effectively scalars - not (isinstance(obj, np.ndarray) and obj.ndim == 0) - and - # exclude sets if allow_sets is False - not (allow_sets is False and isinstance(obj, abc.Set)) - ) - - def is_array_like(obj): """ Check if the object is array-like. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f45a13249b16c..c15f4ad8e1900 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -15,7 +15,7 @@ import itertools import sys from textwrap import dedent -from typing import FrozenSet, List, Optional, Set, Type, Union +from typing import FrozenSet, List, Optional, Set, Tuple, Type, Union import warnings import numpy as np @@ -6237,6 +6237,75 @@ def stack(self, level=-1, dropna=True): else: return stack(self, level, dropna=dropna) + def explode(self, column: Union[str, Tuple]) -> "DataFrame": + """ + Transform each element of a list-like to a row, replicating the + index values. + + .. versionadded:: 0.25.0 + + Parameters + ---------- + column : str or tuple + + Returns + ------- + DataFrame + Exploded lists to rows of the subset columns; + index will be duplicated for these rows. + + Raises + ------ + ValueError : + if columns of the frame are not unique. + + See Also + -------- + DataFrame.unstack : Pivot a level of the (necessarily hierarchical) + index labels + DataFrame.melt : Unpivot a DataFrame from wide format to long format + Series.explode : Explode a DataFrame from list-like columns to long format. + + Notes + ----- + This routine will explode list-likes including lists, tuples, + Series, and np.ndarray. The result dtype of the subset rows will + be object. Scalars will be returned unchanged. Empty list-likes will + result in a np.nan for that row. + + Examples + -------- + >>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1}) + >>> df + A B + 0 [1, 2, 3] 1 + 1 foo 1 + 2 [] 1 + 3 [3, 4] 1 + + >>> df.explode('A') + A B + 0 1 1 + 0 2 1 + 0 3 1 + 1 foo 1 + 2 NaN 1 + 3 3 1 + 3 4 1 + """ + + if not (is_scalar(column) or isinstance(column, tuple)): + raise ValueError("column must be a scalar") + if not self.columns.is_unique: + raise ValueError("columns must be unique") + + result = self[column].explode() + return ( + self.drop([column], axis=1) + .join(result) + .reindex(columns=self.columns, copy=False) + ) + def unstack(self, level=-1, fill_value=None): """ Pivot a level of the (necessarily hierarchical) index labels, returning @@ -6339,6 +6408,7 @@ def unstack(self, level=-1, fill_value=None): %(other)s pivot_table DataFrame.pivot + Series.explode Examples -------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index e084f99ec5a2c..7bbd30e0c28b1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -963,6 +963,7 @@ def _assert_take_fillable( @Appender(_index_shared_docs["repeat"] % _index_doc_kwargs) def repeat(self, repeats, axis=None): + repeats = ensure_platform_int(repeats) nv.validate_repeat(tuple(), dict(axis=axis)) return self._shallow_copy(self._values.repeat(repeats)) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 670a4666a3440..b673c119c0498 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2091,10 +2091,11 @@ def argsort(self, *args, **kwargs): @Appender(_index_shared_docs["repeat"] % _index_doc_kwargs) def repeat(self, repeats, axis=None): nv.validate_repeat(tuple(), dict(axis=axis)) + repeats = ensure_platform_int(repeats) return MultiIndex( levels=self.levels, codes=[ - level_codes.view(np.ndarray).repeat(repeats) + level_codes.view(np.ndarray).astype(np.intp).repeat(repeats) for level_codes in self.codes ], names=self.names, diff --git a/pandas/core/series.py b/pandas/core/series.py index 46b96c1ece77c..8082069efce3c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -12,7 +12,7 @@ from pandas._config import get_option -from pandas._libs import iNaT, index as libindex, lib, tslibs +from pandas._libs import iNaT, index as libindex, lib, reshape, tslibs from pandas.compat import PY36 from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, deprecate @@ -33,6 +33,7 @@ is_integer, is_iterator, is_list_like, + is_object_dtype, is_scalar, is_string_like, is_timedelta64_dtype, @@ -2007,7 +2008,7 @@ def drop_duplicates(self, keep="first", inplace=False): Examples -------- - Generate an Series with duplicated entries. + Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') @@ -3635,6 +3636,62 @@ def reorder_levels(self, order): result.index = result.index.reorder_levels(order) return result + def explode(self) -> "Series": + """ + Transform each element of a list-like to a row, replicating the + index values. + + .. versionadded:: 0.25.0 + + Returns + ------- + Series + Exploded lists to rows; index will be duplicated for these rows. + + See Also + -------- + Series.str.split : Split string values on specified separator. + Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex + to produce DataFrame. + DataFrame.melt : Unpivot a DataFrame from wide format to long format + DataFrame.explode : Explode a DataFrame from list-like + columns to long format. + + Notes + ----- + This routine will explode list-likes including lists, tuples, + Series, and np.ndarray. The result dtype of the subset rows will + be object. Scalars will be returned unchanged. Empty list-likes will + result in a np.nan for that row. + + Examples + -------- + >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) + >>> s + 0 [1, 2, 3] + 1 foo + 2 [] + 3 [3, 4] + dtype: object + + >>> s.explode() + 0 1 + 0 2 + 0 3 + 1 foo + 2 NaN + 3 3 + 3 4 + dtype: object + """ + if not len(self) or not is_object_dtype(self): + return self.copy() + + values, counts = reshape.explode(np.asarray(self.array)) + + result = Series(values, index=self.index.repeat(counts), name=self.name) + return result + def unstack(self, level=-1, fill_value=None): """ Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. diff --git a/pandas/tests/frame/test_explode.py b/pandas/tests/frame/test_explode.py new file mode 100644 index 0000000000000..b4330aadbfba3 --- /dev/null +++ b/pandas/tests/frame/test_explode.py @@ -0,0 +1,120 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.util import testing as tm + + +def test_error(): + df = pd.DataFrame( + {"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1} + ) + with pytest.raises(ValueError): + df.explode(list("AA")) + + df.columns = list("AA") + with pytest.raises(ValueError): + df.explode("A") + + +def test_basic(): + df = pd.DataFrame( + {"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1} + ) + result = df.explode("A") + expected = pd.DataFrame( + { + "A": pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object + ), + "B": 1, + } + ) + tm.assert_frame_equal(result, expected) + + +def test_multi_index_rows(): + df = pd.DataFrame( + {"A": np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), "B": 1}, + index=pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]), + ) + + result = df.explode("A") + expected = pd.DataFrame( + { + "A": pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], + index=pd.MultiIndex.from_tuples( + [ + ("a", 1), + ("a", 1), + ("a", 1), + ("a", 2), + ("b", 1), + ("b", 2), + ("b", 2), + ] + ), + dtype=object, + ), + "B": 1, + } + ) + tm.assert_frame_equal(result, expected) + + +def test_multi_index_columns(): + df = pd.DataFrame( + {("A", 1): np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), ("A", 2): 1} + ) + + result = df.explode(("A", 1)) + expected = pd.DataFrame( + { + ("A", 1): pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], + index=pd.Index([0, 0, 0, 1, 2, 3, 3]), + dtype=object, + ), + ("A", 2): 1, + } + ) + tm.assert_frame_equal(result, expected) + + +def test_usecase(): + # explode a single column + # gh-10511 + df = pd.DataFrame( + [[11, range(5), 10], [22, range(3), 20]], columns=list("ABC") + ).set_index("C") + result = df.explode("B") + + expected = pd.DataFrame( + { + "A": [11, 11, 11, 11, 11, 22, 22, 22], + "B": np.array([0, 1, 2, 3, 4, 0, 1, 2], dtype=object), + "C": [10, 10, 10, 10, 10, 20, 20, 20], + }, + columns=list("ABC"), + ).set_index("C") + + tm.assert_frame_equal(result, expected) + + # gh-8517 + df = pd.DataFrame( + [["2014-01-01", "Alice", "A B"], ["2014-01-02", "Bob", "C D"]], + columns=["dt", "name", "text"], + ) + result = df.assign(text=df.text.str.split(" ")).explode("text") + expected = pd.DataFrame( + [ + ["2014-01-01", "Alice", "A"], + ["2014-01-01", "Alice", "B"], + ["2014-01-02", "Bob", "C"], + ["2014-01-02", "Bob", "D"], + ], + columns=["dt", "name", "text"], + index=[0, 0, 1, 1], + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_explode.py b/pandas/tests/series/test_explode.py new file mode 100644 index 0000000000000..331546f7dc73d --- /dev/null +++ b/pandas/tests/series/test_explode.py @@ -0,0 +1,113 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.util import testing as tm + + +def test_basic(): + s = pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd"), name="foo") + result = s.explode() + expected = pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object, name="foo" + ) + tm.assert_series_equal(result, expected) + + +def test_mixed_type(): + s = pd.Series( + [[0, 1, 2], np.nan, None, np.array([]), pd.Series(["a", "b"])], name="foo" + ) + result = s.explode() + expected = pd.Series( + [0, 1, 2, np.nan, None, np.nan, "a", "b"], + index=[0, 0, 0, 1, 2, 3, 4, 4], + dtype=object, + name="foo", + ) + tm.assert_series_equal(result, expected) + + +def test_empty(): + s = pd.Series() + result = s.explode() + expected = s.copy() + tm.assert_series_equal(result, expected) + + +def test_nested_lists(): + s = pd.Series([[[1, 2, 3]], [1, 2], 1]) + result = s.explode() + expected = pd.Series([[1, 2, 3], 1, 2, 1], index=[0, 1, 1, 2]) + tm.assert_series_equal(result, expected) + + +def test_multi_index(): + s = pd.Series( + [[0, 1, 2], np.nan, [], (3, 4)], + name="foo", + index=pd.MultiIndex.from_product([list("ab"), range(2)], names=["foo", "bar"]), + ) + result = s.explode() + index = pd.MultiIndex.from_tuples( + [("a", 0), ("a", 0), ("a", 0), ("a", 1), ("b", 0), ("b", 1), ("b", 1)], + names=["foo", "bar"], + ) + expected = pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], index=index, dtype=object, name="foo" + ) + tm.assert_series_equal(result, expected) + + +def test_large(): + s = pd.Series([range(256)]).explode() + result = s.explode() + tm.assert_series_equal(result, s) + + +def test_invert_array(): + df = pd.DataFrame({"a": pd.date_range("20190101", periods=3, tz="UTC")}) + + listify = df.apply(lambda x: x.array, axis=1) + result = listify.explode() + tm.assert_series_equal(result, df["a"].rename()) + + +@pytest.mark.parametrize( + "s", [pd.Series([1, 2, 3]), pd.Series(pd.date_range("2019", periods=3, tz="UTC"))] +) +def non_object_dtype(s): + result = s.explode() + tm.assert_series_equal(result, s) + + +def test_typical_usecase(): + + df = pd.DataFrame( + [{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}], + columns=["var1", "var2"], + ) + exploded = df.var1.str.split(",").explode() + exploded + result = df[["var2"]].join(exploded) + expected = pd.DataFrame( + {"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")}, + columns=["var2", "var1"], + index=[0, 0, 0, 1, 1, 1], + ) + tm.assert_frame_equal(result, expected) + + +def test_nested_EA(): + # a nested EA array + s = pd.Series( + [ + pd.date_range("20170101", periods=3, tz="UTC"), + pd.date_range("20170104", periods=3, tz="UTC"), + ] + ) + result = s.explode() + expected = pd.Series( + pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1] + ) + tm.assert_series_equal(result, expected)
replaces #24366 closes #16538 closes #10511 Sometimes a values column is presented with list-like values on one row. Instead we may want to split each individual value onto its own row, keeping the same mapping to the other key columns. While it's possible to chain together existing pandas operations (in fact that's exactly what this implementation is) to do this, the sequence of operations is not obvious. By contrast this is available as a built-in operation in say Spark and is a fairly common use case provides a nice inversion here ``` In [31]: s = pd.DataFrame({'a': pd.date_range('20190101', periods=3, tz='UTC')}).apply(lambda x: x.array, axis=1) In [32]: s Out[32]: 0 [2019-01-01 00:00:00+00:00] 1 [2019-01-02 00:00:00+00:00] 2 [2019-01-03 00:00:00+00:00] dtype: object In [33]: s.iloc[0] Out[33]: <DatetimeArray> ['2019-01-01 00:00:00+00:00'] Length: 1, dtype: datetime64[ns, UTC] In [34]: s.explode() Out[34]: 0 2019-01-01 00:00:00+00:00 1 2019-01-02 00:00:00+00:00 2 2019-01-03 00:00:00+00:00 dtype: datetime64[ns, UTC] ``` ``` In [4]: s = pd.Series([np.arange(np.random.randint(100)) for _ in range(100000)]) In [5]: s Out[5]: 0 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... 1 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... 2 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... 3 [0, 1, 2, 3, 4] 4 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... ... 99995 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... 99996 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... 99997 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... 99998 [0, 1, 2, 3] 99999 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... Length: 100000, dtype: object In [6]: s.explode() Out[6]: 0 0 0 1 0 2 0 3 0 4 .. 99999 43 99999 44 99999 45 99999 46 99999 47 Length: 4950205, dtype: object In [7]: %timeit s.explode() 584 ms ± 17.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) ``` Dataframe exploding ``` In [1]: df = pd.DataFrame( ...: [[11, range(5), 10], [22, range(3), 20]], columns=["A", "B", "C"] ...: ).set_index("C") ...: df Out[1]: A B C 10 11 (0, 1, 2, 3, 4) 20 22 (0, 1, 2) In [2]: df.explode(['B']) Out[2]: A B C 10 11 0 10 11 1 10 11 2 10 11 3 10 11 4 20 22 0 20 22 1 20 22 2 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/27267
2019-07-06T19:28:11Z
2019-07-18T10:51:50Z
2019-07-18T10:51:49Z
2019-08-26T20:53:51Z
DOC: Clarify column type for 'on' parameter of rolling
diff --git a/pandas/core/window.py b/pandas/core/window.py index 27588249b1b3c..0c1f6a1a6dace 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -513,8 +513,10 @@ class Window(_Window): Provide a window type. If ``None``, all points are evenly weighted. See the notes below for further information. on : str, optional - For a DataFrame, column on which to calculate - the rolling window, rather than the index. + For a DataFrame, a datetime-like column on which to calculate the rolling + window, rather than the DataFrame's index. Provided integer column is + ignored and excluded from result since an integer index is not used to + calculate the rolling window. axis : int or str, default 0 closed : str, default None Make the interval closed on the 'right', 'left', 'both' or
- [x] closes #21687
https://api.github.com/repos/pandas-dev/pandas/pulls/27265
2019-07-06T17:57:45Z
2019-07-08T12:47:08Z
2019-07-08T12:47:08Z
2019-07-11T15:46:56Z
CLN: remove unnecessary fastpath, transpose kwargs in internals
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4e9f74162ae78..afd65b3c009ab 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9175,7 +9175,6 @@ def _where( errors=errors, try_cast=try_cast, axis=block_axis, - transpose=self._AXIS_REVERSED, ) return self._constructor(new_data).__finalize__(self) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 022d855d9a15b..bf6ebf1abe760 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -143,7 +143,7 @@ def _check_ndim(self, values, ndim): ndim = values.ndim if self._validate_ndim and values.ndim != ndim: - msg = "Wrong number of dimensions. values.ndim != ndim " "[{} != {}]" + msg = "Wrong number of dimensions. values.ndim != ndim [{} != {}]" raise ValueError(msg.format(values.ndim, ndim)) return ndim @@ -259,7 +259,7 @@ def make_block_same_class(self, values, placement=None, ndim=None, dtype=None): if dtype is not None: # issue 19431 fastparquet is passing this warnings.warn( - "dtype argument is deprecated, will be removed " "in a future release.", + "dtype argument is deprecated, will be removed in a future release.", FutureWarning, ) if placement is None: @@ -399,7 +399,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): raise ValueError("Limit must be greater than 0") if self.ndim > 2: raise NotImplementedError( - "number of dimensions for 'fillna' " "is currently limited to 2" + "number of dimensions for 'fillna' is currently limited to 2" ) mask[mask.cumsum(self.ndim - 1) > limit] = False @@ -533,7 +533,7 @@ def downcast(self, dtypes=None): if not (dtypes == "infer" or isinstance(dtypes, dict)): raise ValueError( - "downcast must have a dictionary or 'infer' as " "its argument" + "downcast must have a dictionary or 'infer' as its argument" ) # operate column-by-column @@ -1025,7 +1025,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) or mask[mask].shape[-1] == len(new) or len(new) == 1 ): - raise ValueError("cannot assign mismatch " "length to masked array") + raise ValueError("cannot assign mismatch length to masked array") np.putmask(new_values, mask, new) @@ -1381,16 +1381,7 @@ def shift(self, periods, axis=0, fill_value=None): return [self.make_block(new_values)] - def where( - self, - other, - cond, - align=True, - errors="raise", - try_cast=False, - axis=0, - transpose=False, - ): + def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): """ evaluate the block; return result block(s) from the result @@ -1402,10 +1393,7 @@ def where( errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object - axis : int - transpose : boolean - Set to True if self is stored with axes reversed Returns ------- @@ -1414,6 +1402,7 @@ def where( import pandas.core.computation.expressions as expressions assert errors in ["raise", "ignore"] + transpose = self.ndim == 2 values = self.values orig_other = other @@ -1432,7 +1421,7 @@ def where( cond = cond.T if not hasattr(cond, "shape"): - raise ValueError("where must have a condition that is ndarray " "like") + raise ValueError("where must have a condition that is ndarray like") # our where function def func(cond, values, other): @@ -1473,7 +1462,6 @@ def func(cond, values, other): errors=errors, try_cast=try_cast, axis=axis, - transpose=transpose, ) return self._maybe_downcast(blocks, "infer") @@ -1917,7 +1905,7 @@ def _slice(self, slicer): if isinstance(slicer, tuple) and len(slicer) == 2: if not com.is_null_slice(slicer[0]): - raise AssertionError("invalid slicing for a 1-ndim " "categorical") + raise AssertionError("invalid slicing for a 1-ndim categorical") slicer = slicer[1] return self.values[slicer] @@ -2004,16 +1992,7 @@ def shift( ) ] - def where( - self, - other, - cond, - align=True, - errors="raise", - try_cast=False, - axis=0, - transpose=False, - ): + def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): if isinstance(other, ABCDataFrame): # ExtensionArrays are 1-D, so if we get here then # `other` should be a DataFrame with a single column. @@ -2321,9 +2300,7 @@ def _try_coerce_args(self, other): elif isinstance(other, (datetime, np.datetime64, date)): other = self._box_func(other) if getattr(other, "tz") is not None: - raise TypeError( - "cannot coerce a Timestamp with a tz on a " "naive Block" - ) + raise TypeError("cannot coerce a Timestamp with a tz on a naive Block") other = other.asm8.view("i8") elif hasattr(other, "dtype") and is_datetime64_dtype(other): other = other.astype("i8", copy=False).view("i8") @@ -2997,7 +2974,7 @@ def _replace_single( # only one will survive if to_rep_re and regex_re: raise AssertionError( - "only one of to_replace and regex can be " "regex compilable" + "only one of to_replace and regex can be regex compilable" ) # if regex was passed as something that can be a regex (rather than a @@ -3181,16 +3158,7 @@ def concat_same_type(self, to_concat, placement=None): values, placement=placement or slice(0, len(values), 1), ndim=self.ndim ) - def where( - self, - other, - cond, - align=True, - errors="raise", - try_cast=False, - axis=0, - transpose=False, - ): + def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): # TODO(CategoricalBlock.where): # This can all be deleted in favor of ExtensionBlock.where once # we enforce the deprecation. @@ -3205,19 +3173,11 @@ def where( ) try: # Attempt to do preserve categorical dtype. - result = super().where( - other, cond, align, errors, try_cast, axis, transpose - ) + result = super().where(other, cond, align, errors, try_cast, axis) except (TypeError, ValueError): warnings.warn(object_msg, FutureWarning, stacklevel=6) result = self.astype(object).where( - other, - cond, - align=align, - errors=errors, - try_cast=try_cast, - axis=axis, - transpose=transpose, + other, cond, align=align, errors=errors, try_cast=try_cast, axis=axis ) return result @@ -3286,7 +3246,7 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, fastpath=No if fastpath is not None: # GH#19265 pyarrow is passing this warnings.warn( - "fastpath argument is deprecated, will be removed " "in a future release.", + "fastpath argument is deprecated, will be removed in a future release.", FutureWarning, ) if klass is None: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index c5254aaa4af5f..b3c74aaaa5701 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -936,7 +936,7 @@ def _consolidate_inplace(self): self._known_consolidated = True self._rebuild_blknos_and_blklocs() - def get(self, item, fastpath=True): + def get(self, item): """ Return values for selected item (ndarray or BlockManager). """ @@ -954,7 +954,7 @@ def get(self, item, fastpath=True): else: raise ValueError("cannot label index with a null key") - return self.iget(loc, fastpath=fastpath) + return self.iget(loc) else: if isna(item): @@ -965,18 +965,18 @@ def get(self, item, fastpath=True): new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True ) - def iget(self, i, fastpath=True): + def iget(self, i): """ - Return the data as a SingleBlockManager if fastpath=True and possible + Return the data as a SingleBlockManager if possible Otherwise return as a ndarray """ block = self.blocks[self._blknos[i]] values = block.iget(self._blklocs[i]) - if not fastpath or values.ndim != 1: + if values.ndim != 1: return values - # fastpath shortcut for select a single-dim from a 2-dim BM + # shortcut for select a single-dim from a 2-dim BM return SingleBlockManager( [ block.make_block_same_class( diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 9ce1062a6ec26..6beb847da3eb4 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -418,9 +418,6 @@ def test_get(self): block = make_block(values=values.copy(), placement=np.arange(3)) mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)]) - assert_almost_equal(mgr.get("a", fastpath=False), values[0]) - assert_almost_equal(mgr.get("b", fastpath=False), values[1]) - assert_almost_equal(mgr.get("c", fastpath=False), values[2]) assert_almost_equal(mgr.get("a").internal_values(), values[0]) assert_almost_equal(mgr.get("b").internal_values(), values[1]) assert_almost_equal(mgr.get("c").internal_values(), values[2]) @@ -701,6 +698,7 @@ def test_consolidate_ordering_issues(self, mgr): ) def test_reindex_index(self): + # TODO: should this be pytest.skip? pass def test_reindex_items(self): @@ -710,18 +708,6 @@ def test_reindex_items(self): reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0) assert reindexed.nblocks == 2 tm.assert_index_equal(reindexed.items, pd.Index(["g", "c", "a", "d"])) - assert_almost_equal( - mgr.get("g", fastpath=False), reindexed.get("g", fastpath=False) - ) - assert_almost_equal( - mgr.get("c", fastpath=False), reindexed.get("c", fastpath=False) - ) - assert_almost_equal( - mgr.get("a", fastpath=False), reindexed.get("a", fastpath=False) - ) - assert_almost_equal( - mgr.get("d", fastpath=False), reindexed.get("d", fastpath=False) - ) assert_almost_equal( mgr.get("g").internal_values(), reindexed.get("g").internal_values() ) @@ -747,18 +733,12 @@ def test_get_numeric_data(self): tm.assert_index_equal( numeric.items, pd.Index(["int", "float", "complex", "bool"]) ) - assert_almost_equal( - mgr.get("float", fastpath=False), numeric.get("float", fastpath=False) - ) assert_almost_equal( mgr.get("float").internal_values(), numeric.get("float").internal_values() ) # Check sharing numeric.set("float", np.array([100.0, 200.0, 300.0])) - assert_almost_equal( - mgr.get("float", fastpath=False), np.array([100.0, 200.0, 300.0]) - ) assert_almost_equal( mgr.get("float").internal_values(), np.array([100.0, 200.0, 300.0]) ) @@ -768,9 +748,6 @@ def test_get_numeric_data(self): numeric.items, pd.Index(["int", "float", "complex", "bool"]) ) numeric2.set("float", np.array([1000.0, 2000.0, 3000.0])) - assert_almost_equal( - mgr.get("float", fastpath=False), np.array([100.0, 200.0, 300.0]) - ) assert_almost_equal( mgr.get("float").internal_values(), np.array([100.0, 200.0, 300.0]) ) @@ -785,17 +762,11 @@ def test_get_bool_data(self): bools = mgr.get_bool_data() tm.assert_index_equal(bools.items, pd.Index(["bool"])) - assert_almost_equal( - mgr.get("bool", fastpath=False), bools.get("bool", fastpath=False) - ) assert_almost_equal( mgr.get("bool").internal_values(), bools.get("bool").internal_values() ) bools.set("bool", np.array([True, False, True])) - tm.assert_numpy_array_equal( - mgr.get("bool", fastpath=False), np.array([True, False, True]) - ) tm.assert_numpy_array_equal( mgr.get("bool").internal_values(), np.array([True, False, True]) ) @@ -803,9 +774,6 @@ def test_get_bool_data(self): # Check sharing bools2 = mgr.get_bool_data(copy=True) bools2.set("bool", np.array([False, True, False])) - tm.assert_numpy_array_equal( - mgr.get("bool", fastpath=False), np.array([True, False, True]) - ) tm.assert_numpy_array_equal( mgr.get("bool").internal_values(), np.array([True, False, True]) )
also clean up some unfortunate formatting left by black. I _think_ we can also get rid of the transpose kwarg in putmask, separate pass.
https://api.github.com/repos/pandas-dev/pandas/pulls/27260
2019-07-06T03:26:49Z
2019-07-06T18:12:12Z
2019-07-06T18:12:12Z
2019-07-06T21:02:40Z
COMPAT: catch InvalidIndexError in base Indexer getitem
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 0bcaa83c49628..c30885291ffc9 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -24,7 +24,7 @@ from pandas.core.dtypes.missing import _infer_fill_value, isna import pandas.core.common as com -from pandas.core.index import Index, MultiIndex +from pandas.core.index import Index, InvalidIndexError, MultiIndex # the supported indexers @@ -118,7 +118,7 @@ def __getitem__(self, key): key = tuple(com.apply_if_callable(x, self.obj) for x in key) try: values = self.obj._get_value(*key) - except (KeyError, TypeError): + except (KeyError, TypeError, InvalidIndexError): # TypeError occurs here if the key has non-hashable entries, # generally slice or list. # TODO(ix): most/all of the TypeError cases here are for ix,
Closes https://github.com/pandas-dev/pandas/issues/27258
https://api.github.com/repos/pandas-dev/pandas/pulls/27259
2019-07-06T02:11:11Z
2019-07-08T01:15:44Z
2019-07-08T01:15:43Z
2019-07-08T12:23:00Z
REF: ops.missing
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 973a022cfc3f1..d3837617d231a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -69,6 +69,7 @@ from pandas.core.indexes.frozen import FrozenList import pandas.core.missing as missing from pandas.core.ops import get_op_result_name, make_invalid_op +from pandas.core.ops.missing import dispatch_missing import pandas.core.sorting as sorting from pandas.core.strings import StringMethods @@ -154,7 +155,7 @@ def index_arithmetic_method(self, other): with np.errstate(all="ignore"): result = op(values, other) - result = missing.dispatch_missing(op, values, other, result) + result = dispatch_missing(op, values, other, result) attrs = self._get_attributes_dict() attrs = self._maybe_update_attributes(attrs) diff --git a/pandas/core/missing.py b/pandas/core/missing.py index ad4b5e4523806..8f0abc91f7aef 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -1,8 +1,6 @@ """ Routines for filling missing data. """ -import operator - import numpy as np from pandas._libs import algos, lib @@ -13,7 +11,6 @@ ensure_float64, is_datetime64_dtype, is_datetime64tz_dtype, - is_float_dtype, is_integer, is_integer_dtype, is_numeric_v_string_like, @@ -578,141 +575,6 @@ def clean_reindex_fill_method(method): return clean_fill_method(method, allow_nearest=True) -def fill_zeros(result, x, y, name, fill): - """ - If this is a reversed op, then flip x,y - - If we have an integer value (or array in y) - and we have 0's, fill them with the fill, - return the result. - - Mask the nan's from x. - """ - if fill is None or is_float_dtype(result): - return result - - if name.startswith(("r", "__r")): - x, y = y, x - - is_variable_type = hasattr(y, "dtype") or hasattr(y, "type") - is_scalar_type = is_scalar(y) - - if not is_variable_type and not is_scalar_type: - return result - - if is_scalar_type: - y = np.array(y) - - if is_integer_dtype(y): - - if (y == 0).any(): - - # GH 7325, mask and nans must be broadcastable (also: PR 9308) - # Raveling and then reshaping makes np.putmask faster - mask = ((y == 0) & ~np.isnan(result)).ravel() - - shape = result.shape - result = result.astype("float64", copy=False).ravel() - - np.putmask(result, mask, fill) - - # if we have a fill of inf, then sign it correctly - # (GH 6178 and PR 9308) - if np.isinf(fill): - signs = y if name.startswith(("r", "__r")) else x - signs = np.sign(signs.astype("float", copy=False)) - negative_inf_mask = (signs.ravel() < 0) & mask - np.putmask(result, negative_inf_mask, -fill) - - if "floordiv" in name: # (PR 9308) - nan_mask = ((y == 0) & (x == 0)).ravel() - np.putmask(result, nan_mask, np.nan) - - result = result.reshape(shape) - - return result - - -def mask_zero_div_zero(x, y, result, copy=False): - """ - Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes - of the numerator or the denominator. - - Parameters - ---------- - x : ndarray - y : ndarray - result : ndarray - copy : bool (default False) - Whether to always create a new array or try to fill in the existing - array if possible. - - Returns - ------- - filled_result : ndarray - - Examples - -------- - >>> x = np.array([1, 0, -1], dtype=np.int64) - >>> y = 0 # int 0; numpy behavior is different with float - >>> result = x / y - >>> result # raw numpy result does not fill division by zero - array([0, 0, 0]) - >>> mask_zero_div_zero(x, y, result) - array([ inf, nan, -inf]) - """ - if is_scalar(y): - y = np.array(y) - - zmask = y == 0 - if zmask.any(): - shape = result.shape - - nan_mask = (zmask & (x == 0)).ravel() - neginf_mask = (zmask & (x < 0)).ravel() - posinf_mask = (zmask & (x > 0)).ravel() - - if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): - # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN - result = result.astype("float64", copy=copy).ravel() - - np.putmask(result, nan_mask, np.nan) - np.putmask(result, posinf_mask, np.inf) - np.putmask(result, neginf_mask, -np.inf) - - result = result.reshape(shape) - - return result - - -def dispatch_missing(op, left, right, result): - """ - Fill nulls caused by division by zero, casting to a different dtype - if necessary. - - Parameters - ---------- - op : function (operator.add, operator.div, ...) - left : object (Index for non-reversed ops) - right : object (Index fof reversed ops) - result : ndarray - - Returns - ------- - result : ndarray - """ - opstr = "__{opname}__".format(opname=op.__name__).replace("____", "__") - if op in [operator.truediv, operator.floordiv, getattr(operator, "div", None)]: - result = mask_zero_div_zero(left, right, result) - elif op is operator.mod: - result = fill_zeros(result, left, right, opstr, np.nan) - elif op is divmod: - res0 = mask_zero_div_zero(left, right, result[0]) - res1 = fill_zeros(result[1], left, right, opstr, np.nan) - result = (res0, res1) - return result - - def _interp_limit(invalid, fw_limit, bw_limit): """ Get indexers of values that won't be filled diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 4692ec45df0ad..3ce6da6891a7f 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -49,8 +49,8 @@ import pandas as pd from pandas._typing import ArrayLike import pandas.core.common as com -import pandas.core.missing as missing +from . import missing from .roperator import ( # noqa:F401 radd, rand_, diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py new file mode 100644 index 0000000000000..947dfc68ac7c3 --- /dev/null +++ b/pandas/core/ops/missing.py @@ -0,0 +1,165 @@ +""" +Missing data handling for arithmetic operations. + +In particular, pandas conventions regarding divison by zero differ +from numpy in the following ways: + 1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2) + gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for + the remaining pairs + (the remaining being dtype1==dtype2==intN and dtype==dtype2==uintN). + + pandas convention is to return [-inf, nan, inf] for all dtype + combinations. + + Note: the numpy behavior described here is py3-specific. + + 2) np.array([-1, 0, 1], dtype=dtype1) % np.array([0, 0, 0], dtype=dtype2) + gives precisely the same results as the // operation. + + pandas convention is to return [nan, nan, nan] for all dtype + combinations. + + 3) divmod behavior consistent with 1) and 2). +""" +import operator + +import numpy as np + +from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_scalar + + +def fill_zeros(result, x, y, name, fill): + """ + If this is a reversed op, then flip x,y + + If we have an integer value (or array in y) + and we have 0's, fill them with the fill, + return the result. + + Mask the nan's from x. + """ + if fill is None or is_float_dtype(result): + return result + + if name.startswith(("r", "__r")): + x, y = y, x + + is_variable_type = hasattr(y, "dtype") or hasattr(y, "type") + is_scalar_type = is_scalar(y) + + if not is_variable_type and not is_scalar_type: + return result + + if is_scalar_type: + y = np.array(y) + + if is_integer_dtype(y): + + if (y == 0).any(): + + # GH#7325, mask and nans must be broadcastable (also: GH#9308) + # Raveling and then reshaping makes np.putmask faster + mask = ((y == 0) & ~np.isnan(result)).ravel() + + shape = result.shape + result = result.astype("float64", copy=False).ravel() + + np.putmask(result, mask, fill) + + # if we have a fill of inf, then sign it correctly + # (GH#6178 and GH#9308) + if np.isinf(fill): + signs = y if name.startswith(("r", "__r")) else x + signs = np.sign(signs.astype("float", copy=False)) + negative_inf_mask = (signs.ravel() < 0) & mask + np.putmask(result, negative_inf_mask, -fill) + + if "floordiv" in name: # (GH#9308) + nan_mask = ((y == 0) & (x == 0)).ravel() + np.putmask(result, nan_mask, np.nan) + + result = result.reshape(shape) + + return result + + +def mask_zero_div_zero(x, y, result, copy=False): + """ + Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes + of the numerator or the denominator. + + Parameters + ---------- + x : ndarray + y : ndarray + result : ndarray + copy : bool (default False) + Whether to always create a new array or try to fill in the existing + array if possible. + + Returns + ------- + filled_result : ndarray + + Examples + -------- + >>> x = np.array([1, 0, -1], dtype=np.int64) + >>> y = 0 # int 0; numpy behavior is different with float + >>> result = x / y + >>> result # raw numpy result does not fill division by zero + array([0, 0, 0]) + >>> mask_zero_div_zero(x, y, result) + array([ inf, nan, -inf]) + """ + if is_scalar(y): + y = np.array(y) + + zmask = y == 0 + if zmask.any(): + shape = result.shape + + nan_mask = (zmask & (x == 0)).ravel() + neginf_mask = (zmask & (x < 0)).ravel() + posinf_mask = (zmask & (x > 0)).ravel() + + if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): + # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN + result = result.astype("float64", copy=copy).ravel() + + np.putmask(result, nan_mask, np.nan) + np.putmask(result, posinf_mask, np.inf) + np.putmask(result, neginf_mask, -np.inf) + + result = result.reshape(shape) + + return result + + +def dispatch_missing(op, left, right, result): + """ + Fill nulls caused by division by zero, casting to a different dtype + if necessary. + + Parameters + ---------- + op : function (operator.add, operator.div, ...) + left : object (Index for non-reversed ops) + right : object (Index fof reversed ops) + result : ndarray + + Returns + ------- + result : ndarray + """ + opstr = "__{opname}__".format(opname=op.__name__).replace("____", "__") + if op is operator.floordiv: + # Note: no need to do this for truediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(left, right, result) + elif op is operator.mod: + result = fill_zeros(result, left, right, opstr, np.nan) + elif op is divmod: + res0 = mask_zero_div_zero(left, right, result[0]) + res1 = fill_zeros(result[1], left, right, opstr, np.nan) + result = (res0, res1) + return result diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index f46fbcdb504e9..a18f8380f80c1 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -1063,8 +1063,7 @@ def test_replace_series_datetime_tz(self): # TODO(jreback) commented out to only have a single xfail printed @pytest.mark.xfail( - reason="different tz, " "currently mask_missing raises SystemError", - strict=False, + reason="different tz, currently mask_missing raises SystemError", strict=False ) # @pytest.mark.parametrize('how', ['dict', 'series']) # @pytest.mark.parametrize('to_key', [
- Move arithmetic-centric functions from core.missing to core.ops.missing. - module-level docstring giving the motivation for these functions - tiny optimization: ``` if op in [operator.floordiv, operator.truediv, getattr(operator, "div", None)]: result = mask_zero_div_zero(left, right, result) ``` becomes ``` if op is operator.floordiv: result = mask_zero_div_zero(left, right, result) ``` because in py3 numpy behaves the way we want for truediv.
https://api.github.com/repos/pandas-dev/pandas/pulls/27257
2019-07-06T01:51:02Z
2019-07-06T17:43:37Z
2019-07-06T17:43:37Z
2019-07-06T21:03:18Z
TST: add tests to validate margin results for pivot (#25815)
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index b497f6c3aa9b4..4e2302472b294 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -286,6 +286,32 @@ def test_pivot_with_interval_index(self, interval_values, dropna): expected = DataFrame({"B": 1}, index=Index(interval_values.unique(), name="A")) tm.assert_frame_equal(result, expected) + def test_pivot_with_interval_index_margins(self): + # GH 25815 + ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2]) + df = DataFrame( + { + "A": np.arange(4, 0, -1, dtype=np.intp), + "B": ["a", "b", "a", "b"], + "C": pd.Categorical(ordered_cat, ordered=True).sort_values( + ascending=False + ), + } + ) + + pivot_tab = pd.pivot_table( + df, index="C", columns="B", values="A", aggfunc="sum", margins=True + ) + + result = pivot_tab["All"] + expected = Series( + [3, 7, 10], + index=Index([pd.Interval(0, 1), pd.Interval(1, 2), "All"], name="C"), + name="All", + dtype=np.intp, + ) + tm.assert_series_equal(result, expected) + def test_pass_array(self): result = self.data.pivot_table("D", index=self.data.A, columns=self.data.C) expected = self.data.pivot_table("D", index="A", columns="C")
- [x ] closes #25815 - [x ] tests added / passed - [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27245
2019-07-05T08:27:48Z
2019-07-09T21:29:23Z
2019-07-09T21:29:23Z
2023-01-31T05:15:51Z
BUG: merge_asof with multiple by columns with tz
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index ab242ece98181..101addfa097f8 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1151,6 +1151,7 @@ Reshaping - Bug in :func:`DataFrame.pivot_table` with a :class:`IntervalIndex` as pivot index would raise ``TypeError`` (:issue:`25814`) - Bug in :meth:`DataFrame.transpose` where transposing a DataFrame with a timezone-aware datetime column would incorrectly raise ``ValueError`` (:issue:`26825`) - Bug in :func:`pivot_table` when pivoting a timezone aware column as the ``values`` would remove timezone information (:issue:`14948`) +- Bug in :func:`merge_asof` when specifying multiple ``by`` columns where one is ``datetime64[ns, tz]`` dtype (:issue:`26649`) Sparse ^^^^^^ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 4f910f6a278ad..c1a07c129f7cd 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1686,6 +1686,9 @@ def _get_join_indexers(self): def flip(xs): """ unlike np.transpose, this returns an array of tuples """ + xs = [ + x if not is_extension_array_dtype(x) else x._ndarray_values for x in xs + ] labels = list(string.ascii_lowercase[: len(xs)]) dtypes = [x.dtype for x in xs] labeled_dtypes = list(zip(labels, dtypes)) diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index e2e17397464fe..6b66386bafc5e 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -190,9 +190,9 @@ def test_basic_left_index(self): result = merge_asof( trades, quotes, left_index=True, right_on="time", by="ticker" ) - # left-only index uses right's index, oddly + # left-only index uses right"s index, oddly expected.index = result.index - # time column appears after left's columns + # time column appears after left"s columns expected = expected[result.columns] assert_frame_equal(result, expected) @@ -233,7 +233,7 @@ def test_multi_index(self): def test_on_and_index(self): - # 'on' parameter and index together is prohibited + # "on" parameter and index together is prohibited trades = self.trades.set_index("time") quotes = self.quotes.set_index("time") with pytest.raises(MergeError): @@ -1220,3 +1220,29 @@ def test_merge_by_col_tz_aware(self): columns=["by_col", "on_col", "values_x", "values_y"], ) assert_frame_equal(result, expected) + + def test_by_mixed_tz_aware(self): + # GH 26649 + left = pd.DataFrame( + { + "by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "by_col2": ["HELLO"], + "on_col": [2], + "value": ["a"], + } + ) + right = pd.DataFrame( + { + "by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "by_col2": ["WORLD"], + "on_col": [1], + "value": ["b"], + } + ) + result = pd.merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col") + expected = pd.DataFrame( + [[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]], + columns=["by_col1", "by_col2", "on_col", "value_x"], + ) + expected["value_y"] = np.array([np.nan], dtype=object) + assert_frame_equal(result, expected)
- [x] closes #26649 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27243
2019-07-05T07:14:18Z
2019-07-05T14:29:35Z
2019-07-05T14:29:35Z
2020-03-14T02:08:52Z
Separate MultiIndex names from levels
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 1112e42489342..17833b70b930f 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -123,7 +123,37 @@ source, you should no longer need to install Cython into your build environment Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- :class:`pandas.core.groupby.GroupBy.transform` now raises on invalid operation names (:issue:`27489`). +.. _whatsnew_1000.api_breaking.MultiIndex._names: + +``MultiIndex.levels`` do not hold level names any longer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- A :class:`MultiIndex` previously stored the level names as attributes of each of its + :attr:`MultiIndex.levels`. From Pandas 1.0, the names are only accessed through + :attr:`MultiIndex.names` (which was also possible previously). This is done in order to + make :attr:`MultiIndex.levels` more similar to :attr:`CategoricalIndex.categories` (:issue:`27242`:). + +*pandas 0.25.x* + +.. code-block:: ipython + + In [1]: mi = pd.MultiIndex.from_product([[1, 2], ['a', 'b']], names=['x', 'y']) + Out[2]: mi + MultiIndex([(1, 'a'), + (1, 'b'), + (2, 'a'), + (2, 'b')], + names=['x', 'y']) + Out[3]: mi.levels[0].name + 'x' + +*pandas 1.0.0* + +.. ipython:: python + + mi = pd.MultiIndex.from_product([[1, 2], ['a', 'b']], names=['x', 'y']) + mi.levels[0].name + - :class:`pandas.core.arrays.IntervalArray` adopts a new ``__repr__`` in accordance with other array classes (:issue:`25022`) *pandas 0.25.x* @@ -149,6 +179,7 @@ Backwards incompatible API changes Other API changes ^^^^^^^^^^^^^^^^^ +- :class:`pandas.core.groupby.GroupBy.transform` now raises on invalid operation names (:issue:`27489`) - :meth:`pandas.api.types.infer_dtype` will now return "integer-na" for integer and ``np.nan`` mix (:issue:`27283`) - :meth:`MultiIndex.from_arrays` will no longer infer names from arrays if ``names=None`` is explicitly provided (:issue:`27292`) - In order to improve tab-completion, Pandas does not include most deprecated attributes when introspecting a pandas object using ``dir`` (e.g. ``dir(df)``). diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 79e941f262931..28bafd9c300be 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7772,7 +7772,8 @@ def _count_level(self, level, axis=0, numeric_only=False): if isinstance(level, str): level = count_axis._get_level_number(level) - level_index = count_axis.levels[level] + level_name = count_axis._names[level] + level_index = count_axis.levels[level]._shallow_copy(name=level_name) level_codes = ensure_int64(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=0) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 596eaf0c55dbd..b0a1ed0650f7c 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -274,6 +274,7 @@ def __new__( result._set_levels(levels, copy=copy, validate=False) result._set_codes(codes, copy=copy, validate=False) + result._names = [None] * len(levels) if names is not None: # handles name validation result._set_names(names) @@ -1216,7 +1217,7 @@ def __len__(self): return len(self.codes[0]) def _get_names(self): - return FrozenList(level.name for level in self.levels) + return FrozenList(self._names) def _set_names(self, names, level=None, validate=True): """ @@ -1262,7 +1263,7 @@ def _set_names(self, names, level=None, validate=True): level = [self._get_level_number(l) for l in level] # set the name - for l, name in zip(level, names): + for lev, name in zip(level, names): if name is not None: # GH 20527 # All items in 'names' need to be hashable: @@ -1272,7 +1273,7 @@ def _set_names(self, names, level=None, validate=True): self.__class__.__name__ ) ) - self.levels[l].rename(name, inplace=True) + self._names[lev] = name names = property( fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n""" @@ -1582,13 +1583,13 @@ def _get_level_values(self, level, unique=False): values : ndarray """ - values = self.levels[level] + lev = self.levels[level] level_codes = self.codes[level] + name = self._names[level] if unique: level_codes = algos.unique(level_codes) - filled = algos.take_1d(values._values, level_codes, fill_value=values._na_value) - values = values._shallow_copy(filled) - return values + filled = algos.take_1d(lev._values, level_codes, fill_value=lev._na_value) + return lev._shallow_copy(filled, name=name) def get_level_values(self, level): """ diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index e654685d24d9d..340e964d7c14f 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -259,10 +259,10 @@ def get_new_values(self): def get_new_columns(self): if self.value_columns is None: if self.lift == 0: - return self.removed_level + return self.removed_level._shallow_copy(name=self.removed_name) - lev = self.removed_level - return lev.insert(0, lev._na_value) + lev = self.removed_level.insert(0, item=self.removed_level._na_value) + return lev.rename(self.removed_name) stride = len(self.removed_level) + self.lift width = len(self.value_columns) @@ -298,10 +298,10 @@ def get_new_index(self): # construct the new index if len(self.new_index_levels) == 1: - lev, lab = self.new_index_levels[0], result_codes[0] - if (lab == -1).any(): - lev = lev.insert(len(lev), lev._na_value) - return lev.take(lab) + level, level_codes = self.new_index_levels[0], result_codes[0] + if (level_codes == -1).any(): + level = level.insert(len(level), level._na_value) + return level.take(level_codes).rename(self.new_index_names[0]) return MultiIndex( levels=self.new_index_levels, @@ -661,7 +661,8 @@ def _convert_level_number(level_num, columns): new_names = this.columns.names[:-1] new_columns = MultiIndex.from_tuples(unique_groups, names=new_names) else: - new_columns = unique_groups = this.columns.levels[0] + new_columns = this.columns.levels[0]._shallow_copy(name=this.columns.names[0]) + unique_groups = new_columns # time to ravel the values new_data = {} diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 9016e8a98e5ba..1e27421a55499 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -243,8 +243,10 @@ def build_table_schema(data, index=True, primary_key=None, version=True): if index: if data.index.nlevels > 1: - for level in data.index.levels: - fields.append(convert_pandas_type_to_json_field(level)) + for level, name in zip(data.index.levels, data.index.names): + new_field = convert_pandas_type_to_json_field(level) + new_field["name"] = name + fields.append(new_field) else: fields.append(convert_pandas_type_to_json_field(data.index)) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 017cbea7ec723..b310335be5f65 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -978,7 +978,7 @@ def test_reset_index(self, float_frame): ): values = lev.take(level_codes) name = names[i] - tm.assert_index_equal(values, Index(deleveled[name])) + tm.assert_index_equal(values, Index(deleveled[name].rename(name=None))) stacked.index.names = [None, None] deleveled2 = stacked.reset_index() diff --git a/pandas/tests/indexes/multi/test_astype.py b/pandas/tests/indexes/multi/test_astype.py index 4adcdd0112b26..f320a89c471bf 100644 --- a/pandas/tests/indexes/multi/test_astype.py +++ b/pandas/tests/indexes/multi/test_astype.py @@ -11,7 +11,7 @@ def test_astype(idx): actual = idx.astype("O") assert_copy(actual.levels, expected.levels) assert_copy(actual.codes, expected.codes) - assert [level.name for level in actual.levels] == list(expected.names) + assert actual.names == list(expected.names) with pytest.raises(TypeError, match="^Setting.*dtype.*object"): idx.astype(np.dtype(int)) diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py index 9472d539537ba..993979f31a35b 100644 --- a/pandas/tests/indexes/multi/test_constructor.py +++ b/pandas/tests/indexes/multi/test_constructor.py @@ -17,7 +17,7 @@ def test_constructor_single_level(): levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"] ) assert isinstance(result, MultiIndex) - expected = Index(["foo", "bar", "baz", "qux"], name="first") + expected = Index(["foo", "bar", "baz", "qux"]) tm.assert_index_equal(result.levels[0], expected) assert result.names == ["first"] @@ -292,8 +292,9 @@ def test_from_arrays_empty(): # 1 level result = MultiIndex.from_arrays(arrays=[[]], names=["A"]) assert isinstance(result, MultiIndex) - expected = Index([], name="A") + expected = Index([]) tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["A"] # N levels for N in [2, 3]: @@ -439,8 +440,9 @@ def test_from_product_empty_zero_levels(): def test_from_product_empty_one_level(): result = MultiIndex.from_product([[]], names=["A"]) - expected = pd.Index([], name="A") + expected = pd.Index([]) tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["A"] @pytest.mark.parametrize( diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py index 5856cb56b307b..679e045a68f29 100644 --- a/pandas/tests/indexes/multi/test_names.py +++ b/pandas/tests/indexes/multi/test_names.py @@ -27,28 +27,25 @@ def test_index_name_retained(): def test_changing_names(idx): - - # names should be applied to levels - level_names = [level.name for level in idx.levels] - check_level_names(idx, idx.names) + assert [level.name for level in idx.levels] == [None, None] view = idx.view() copy = idx.copy() shallow_copy = idx._shallow_copy() - # changing names should change level names on object + # changing names should not change level names on object new_names = [name + "a" for name in idx.names] idx.names = new_names - check_level_names(idx, new_names) + check_level_names(idx, [None, None]) - # but not on copies - check_level_names(view, level_names) - check_level_names(copy, level_names) - check_level_names(shallow_copy, level_names) + # and not on copies + check_level_names(view, [None, None]) + check_level_names(copy, [None, None]) + check_level_names(shallow_copy, [None, None]) # and copies shouldn't change original shallow_copy.names = [name + "c" for name in shallow_copy.names] - check_level_names(idx, new_names) + check_level_names(idx, [None, None]) def test_take_preserve_name(idx): @@ -82,9 +79,9 @@ def test_copy_names(): def test_names(idx, index_names): # names are assigned in setup - names = index_names + assert index_names == ["first", "second"] level_names = [level.name for level in idx.levels] - assert names == level_names + assert level_names == [None, None] # setting bad names on existing index = idx @@ -109,11 +106,10 @@ def test_names(idx, index_names): names=["first", "second", "third"], ) - # names are assigned + # names are assigned on index, but not transferred to the levels index.names = ["a", "b"] - ind_names = list(index.names) level_names = [level.name for level in index.levels] - assert ind_names == level_names + assert level_names == [None, None] def test_duplicate_level_names_access_raises(idx): diff --git a/pandas/tests/indexes/multi/test_reindex.py b/pandas/tests/indexes/multi/test_reindex.py index 88de4d1e80386..970288e5747c7 100644 --- a/pandas/tests/indexes/multi/test_reindex.py +++ b/pandas/tests/indexes/multi/test_reindex.py @@ -6,19 +6,17 @@ import pandas.util.testing as tm -def check_level_names(index, names): - assert [level.name for level in index.levels] == list(names) - - def test_reindex(idx): result, indexer = idx.reindex(list(idx[:4])) assert isinstance(result, MultiIndex) - check_level_names(result, idx[:4].names) + assert result.names == ["first", "second"] + assert [level.name for level in result.levels] == [None, None] result, indexer = idx.reindex(list(idx)) assert isinstance(result, MultiIndex) assert indexer is None - check_level_names(result, idx.names) + assert result.names == ["first", "second"] + assert [level.name for level in result.levels] == [None, None] def test_reindex_level(idx): diff --git a/pandas/tests/indexes/multi/test_reshape.py b/pandas/tests/indexes/multi/test_reshape.py index a30e6f33d1499..e79f212f30078 100644 --- a/pandas/tests/indexes/multi/test_reshape.py +++ b/pandas/tests/indexes/multi/test_reshape.py @@ -15,10 +15,11 @@ def test_insert(idx): # key not contained in all levels new_index = idx.insert(0, ("abc", "three")) - exp0 = Index(list(idx.levels[0]) + ["abc"], name="first") + exp0 = Index(list(idx.levels[0]) + ["abc"]) tm.assert_index_equal(new_index.levels[0], exp0) + assert new_index.names == ["first", "second"] - exp1 = Index(list(idx.levels[1]) + ["three"], name="second") + exp1 = Index(list(idx.levels[1]) + ["three"]) tm.assert_index_equal(new_index.levels[1], exp1) assert new_index[0] == ("abc", "three") diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 13f0f14014a31..33cbaaed1848d 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1219,8 +1219,10 @@ def test_concat_keys_specific_levels(self): names=["group_key"], ) - tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key")) - assert result.columns.names[0] == "group_key" + tm.assert_index_equal(result.columns.levels[0], Index(level)) + tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3])) + + assert result.columns.names == ["group_key", None] def test_concat_dataframe_keys_bug(self, sort): t1 = DataFrame( @@ -1409,10 +1411,8 @@ def test_concat_keys_and_levels(self): keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], names=["first", "second"], ) - assert result.index.names == ("first", "second") + (None,) - tm.assert_index_equal( - result.index.levels[0], Index(["baz", "foo"], name="first") - ) + assert result.index.names == ("first", "second", None) + tm.assert_index_equal(result.index.levels[0], Index(["baz", "foo"])) def test_concat_keys_levels_no_overlap(self): # GH #1406 diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index e2c6f7d1c8feb..0b9392a0eeb5b 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -618,16 +618,15 @@ def test_reshaping_multi_index_categorical(self): df.index.names = ["major", "minor"] df["str"] = "foo" - dti = df.index.levels[0] - df["category"] = df["str"].astype("category") result = df["category"].unstack() + dti = df.index.levels[0] c = Categorical(["foo"] * len(dti)) expected = DataFrame( {"A": c.copy(), "B": c.copy(), "C": c.copy(), "D": c.copy()}, columns=Index(list("ABCD"), name="minor"), - index=dti, + index=dti.rename("major"), ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index e641d6f842d87..76436f4480809 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -335,7 +335,7 @@ def test_count_level_corner(self): df = self.frame[:0] result = df.count(level=0) expected = ( - DataFrame(index=s.index.levels[0], columns=df.columns) + DataFrame(index=s.index.levels[0].set_names(["first"]), columns=df.columns) .fillna(0) .astype(np.int64) ) @@ -976,13 +976,11 @@ def test_count(self): result = series.count(level="b") expect = self.series.count(level=1) - tm.assert_series_equal(result, expect, check_names=False) - assert result.index.name == "b" + tm.assert_series_equal(result, expect) result = series.count(level="a") expect = self.series.count(level=0) - tm.assert_series_equal(result, expect, check_names=False) - assert result.index.name == "a" + tm.assert_series_equal(result, expect) msg = "Level x not found" with pytest.raises(KeyError, match=msg): @@ -1014,6 +1012,8 @@ def test_frame_group_ops(self, op, level, axis, skipna, sort): self.frame.iloc[1, [1, 2]] = np.nan self.frame.iloc[7, [0, 1]] = np.nan + level_name = self.frame.index.names[level] + if axis == 0: frame = self.frame else: @@ -1034,7 +1034,7 @@ def aggf(x): frame = frame.sort_index(level=level, axis=axis) # for good measure, groupby detail - level_index = frame._get_axis(axis).levels[level] + level_index = frame._get_axis(axis).levels[level].rename(level_name) tm.assert_index_equal(leftside._get_axis(axis), level_index) tm.assert_index_equal(rightside._get_axis(axis), level_index) @@ -1639,12 +1639,18 @@ def test_constructor_with_tz(self): ) result = MultiIndex.from_arrays([index, columns]) - tm.assert_index_equal(result.levels[0], index) - tm.assert_index_equal(result.levels[1], columns) + + assert result.names == ["dt1", "dt2"] + # levels don't have names set, so set name of index/columns to None in checks + tm.assert_index_equal(result.levels[0], index.rename(name=None)) + tm.assert_index_equal(result.levels[1], columns.rename(name=None)) result = MultiIndex.from_arrays([Series(index), Series(columns)]) - tm.assert_index_equal(result.levels[0], index) - tm.assert_index_equal(result.levels[1], columns) + + assert result.names == ["dt1", "dt2"] + # levels don't have names set, so set name of index/columns to None in checks + tm.assert_index_equal(result.levels[0], index.rename(name=None)) + tm.assert_index_equal(result.levels[1], columns.rename(name=None)) def test_set_index_datetime(self): # GH 3950 @@ -1666,18 +1672,19 @@ def test_set_index_datetime(self): df.index = df.index.tz_convert("US/Pacific") expected = pd.DatetimeIndex( - ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], - name="datetime", + ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"] ) expected = expected.tz_localize("UTC").tz_convert("US/Pacific") df = df.set_index("label", append=True) tm.assert_index_equal(df.index.levels[0], expected) - tm.assert_index_equal(df.index.levels[1], Index(["a", "b"], name="label")) + tm.assert_index_equal(df.index.levels[1], Index(["a", "b"])) + assert df.index.names == ["datetime", "label"] df = df.swaplevel(0, 1) - tm.assert_index_equal(df.index.levels[0], Index(["a", "b"], name="label")) + tm.assert_index_equal(df.index.levels[0], Index(["a", "b"])) tm.assert_index_equal(df.index.levels[1], expected) + assert df.index.names == ["label", "datetime"] df = DataFrame(np.random.random(6)) idx1 = pd.DatetimeIndex(
- [x] progress towards #27138 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry In #27138 I proposed doing some changes to ``MultiIndex``, so that the index type can have its data collected in ``_data`` as type ``List[Categorical]``,+ adding ``MultiIndex.arrays`` in order to access each full level as zero-copy ``Categorical``. This is the first part of that proposal, and drops setting the names on the ``levels[x].name`` attribute and instead sets the names on the ``MultiIndex._names`` attribute. This PR is a minorly backward-breaking change (so would be good to get into 0.25), while the followup will not break anything.
https://api.github.com/repos/pandas-dev/pandas/pulls/27242
2019-07-05T04:47:17Z
2019-10-16T14:40:59Z
2019-10-16T14:40:59Z
2019-10-17T18:44:56Z
BUG: Fix divmod fill value, closes #26987
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 241e445bf6686..9f59be73e501c 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1007,7 +1007,7 @@ Numeric - Raises a helpful exception when a non-numeric index is sent to :meth:`interpolate` with methods which require numeric index. (:issue:`21662`) - Bug in :meth:`~pandas.eval` when comparing floats with scalar operators, for example: ``x < -0.1`` (:issue:`25928`) - Fixed bug where casting all-boolean array to integer extension array failed (:issue:`25211`) -- +- Bug in ``divmod`` with a :class:`Series` object containing zeros incorrectly raising ``AttributeError`` (:issue:`26987`) - Conversion diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 8690e1974330b..6234bc0f7bd35 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -56,7 +56,6 @@ Timezones Numeric ^^^^^^^ - - - - diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 3ce6da6891a7f..df2907bf591dd 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -249,7 +249,7 @@ def _gen_fill_zeros(name): """ name = name.strip("__") if "div" in name: - # truediv, floordiv, div, and reversed variants + # truediv, floordiv, and reversed variants fill_value = np.inf elif "mod" in name: # mod, rmod @@ -1668,14 +1668,7 @@ def na_op(x, y): except TypeError: result = masked_arith_op(x, y, op) - if isinstance(result, tuple): - # e.g. divmod - result = tuple( - missing.fill_zeros(r, x, y, op_name, fill_zeros) for r in result - ) - else: - result = missing.fill_zeros(result, x, y, op_name, fill_zeros) - return result + return missing.dispatch_fill_zeros(op, x, y, result, fill_zeros) def wrapper(left, right): if isinstance(right, ABCDataFrame): @@ -2157,8 +2150,7 @@ def na_op(x, y): except TypeError: result = masked_arith_op(x, y, op) - result = missing.fill_zeros(result, x, y, op_name, fill_zeros) - return result + return missing.dispatch_fill_zeros(op, x, y, result, fill_zeros) if op_name in _op_descriptions: # i.e. include "add" but not "__add__" diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 947dfc68ac7c3..4ca1861baf237 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -27,6 +27,8 @@ from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_scalar +from .roperator import rdivmod + def fill_zeros(result, x, y, name, fill): """ @@ -163,3 +165,24 @@ def dispatch_missing(op, left, right, result): res1 = fill_zeros(result[1], left, right, opstr, np.nan) result = (res0, res1) return result + + +# FIXME: de-duplicate with dispatch_missing +def dispatch_fill_zeros(op, left, right, result, fill_value): + """ + Call fill_zeros with the appropriate fill value depending on the operation, + with special logic for divmod and rdivmod. + """ + if op is divmod: + result = ( + fill_zeros(result[0], left, right, "__floordiv__", np.inf), + fill_zeros(result[1], left, right, "__mod__", np.nan), + ) + elif op is rdivmod: + result = ( + fill_zeros(result[0], left, right, "__rfloordiv__", np.inf), + fill_zeros(result[1], left, right, "__rmod__", np.nan), + ) + else: + result = fill_zeros(result, left, right, op.__name__, fill_value) + return result diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 7dcd0cc820061..f582bf8b13975 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -265,25 +265,11 @@ def test_divmod_zero(self, zero, numeric_idx): # ------------------------------------------------------------------ - @pytest.mark.parametrize( - "dtype2", - [ - np.int64, - np.int32, - np.int16, - np.int8, - np.float64, - np.float32, - np.float16, - np.uint64, - np.uint32, - np.uint16, - np.uint8, - ], - ) @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64]) - def test_ser_div_ser(self, dtype1, dtype2): + def test_ser_div_ser(self, dtype1, any_real_dtype): # no longer do integer div for any ops, but deal with the 0's + dtype2 = any_real_dtype + first = Series([3, 4, 5, 8], name="first").astype(dtype1) second = Series([0, 0, 0, 3], name="second").astype(dtype2) @@ -299,6 +285,39 @@ def test_ser_div_ser(self, dtype1, dtype2): tm.assert_series_equal(result, expected) assert not result.equals(second / first) + @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64]) + def test_ser_divmod_zero(self, dtype1, any_real_dtype): + # GH#26987 + dtype2 = any_real_dtype + left = pd.Series([1, 1]).astype(dtype1) + right = pd.Series([0, 2]).astype(dtype2) + + expected = left // right, left % right + result = divmod(left, right) + + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + # rdivmod case + result = divmod(left.values, right) + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + def test_ser_divmod_inf(self): + left = pd.Series([np.inf, 1.0]) + right = pd.Series([np.inf, 2.0]) + + expected = left // right, left % right + result = divmod(left, right) + + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + # rdivmod case + result = divmod(left.values, right) + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + def test_rdiv_zero_compat(self): # GH#8674 zero_array = np.array([0] * 5) @@ -662,7 +681,9 @@ def test_modulo2(self): result2 = p["second"] % p["first"] assert not result.equals(result2) - # GH#9144 + def test_modulo_zero_int(self): + # GH#9144 + with np.errstate(all="ignore"): s = Series([0, 1]) result = s % 0 diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index 8895544958d7a..5619a0a11fb11 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -578,6 +578,7 @@ def check(a, b): _check_op(a, b, lambda x, y: operator.floordiv(y, x)) _check_op(a, b, lambda x, y: operator.mul(y, x)) + # FIXME: don't leave commented-out # NaN ** 0 = 1 in C? # _check_op(a, b, operator.pow) # _check_op(a, b, lambda x, y: operator.pow(y, x))
- [x] closes #26987 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Takes over from #27130 by implementing dispatch_fill_zeros. This is unfortunately similar to dispatch_missing, and eventually the two functions will be merged. ATM I'm trying to do exactly that in the branch for 27130, having lots of trouble pop up with sparse and IntegerArray.
https://api.github.com/repos/pandas-dev/pandas/pulls/27239
2019-07-05T01:25:34Z
2019-07-06T21:45:47Z
2019-07-06T21:45:47Z
2019-07-06T22:02:24Z
TST/REF: misplaced frame.indexing tests
diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py index c876f78176e2e..e37d00c540974 100644 --- a/pandas/tests/frame/indexing/test_categorical.py +++ b/pandas/tests/frame/indexing/test_categorical.py @@ -352,14 +352,6 @@ def test_assigning_ops(self): df.loc[2:3, "b"] = Categorical(["b", "b"], categories=["a", "b"]) tm.assert_frame_equal(df, exp) - def test_functions_no_warnings(self): - df = DataFrame({"value": np.random.randint(0, 100, 20)}) - labels = [f"{i} - {i + 9}" for i in range(0, 100, 10)] - with tm.assert_produces_warning(False): - df["group"] = pd.cut( - df.value, range(0, 105, 10), right=False, labels=labels - ) - def test_setitem_single_row_categorical(self): # GH 25495 df = DataFrame({"Alpha": ["a"], "Numeric": [0]}) @@ -394,14 +386,3 @@ def test_loc_indexing_preserves_index_category_dtype(self): result = df.loc[["a"]].index.levels[0] tm.assert_index_equal(result, expected) - - def test_categorical_filtering(self): - # GH22609 Verify filtering operations on DataFrames with categorical Series - df = DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"]) - df["b"] = df.b.astype("category") - - result = df.where(df.a > 0) - expected = df.copy() - expected.loc[0, :] = np.nan - - tm.assert_equal(result, expected) diff --git a/pandas/tests/frame/indexing/test_datetime.py b/pandas/tests/frame/indexing/test_datetime.py deleted file mode 100644 index 1866ac341def6..0000000000000 --- a/pandas/tests/frame/indexing/test_datetime.py +++ /dev/null @@ -1,48 +0,0 @@ -import pandas as pd -from pandas import DataFrame, Index, Series, date_range, notna -import pandas._testing as tm - - -class TestDataFrameIndexingDatetimeWithTZ: - def test_setitem(self, timezone_frame): - - df = timezone_frame - idx = df["B"].rename("foo") - - # setitem - df["C"] = idx - tm.assert_series_equal(df["C"], Series(idx, name="C")) - - df["D"] = "foo" - df["D"] = idx - tm.assert_series_equal(df["D"], Series(idx, name="D")) - del df["D"] - - # assert that A & C are not sharing the same base (e.g. they - # are copies) - b1 = df._mgr.blocks[1] - b2 = df._mgr.blocks[2] - tm.assert_extension_array_equal(b1.values, b2.values) - b1base = b1.values._data.base - b2base = b2.values._data.base - assert b1base is None or (id(b1base) != id(b2base)) - - # with nan - df2 = df.copy() - df2.iloc[1, 1] = pd.NaT - df2.iloc[1, 2] = pd.NaT - result = df2["B"] - tm.assert_series_equal(notna(result), Series([True, False, True], name="B")) - tm.assert_series_equal(df2.dtypes, df.dtypes) - - def test_set_reset(self): - - idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo") - - # set/reset - df = DataFrame({"A": [0, 1, 2]}, index=idx) - result = df.reset_index() - assert result["foo"].dtype == "datetime64[ns, US/Eastern]" - - df = result.set_index("foo") - tm.assert_index_equal(df.index, idx) diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 1d5b0936103ee..752cea5cf757c 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1327,81 +1327,6 @@ def test_getitem_list_duplicates(self): expected = df.iloc[:, 2:] tm.assert_frame_equal(result, expected) - def test_reindex_with_multi_index(self): - # https://github.com/pandas-dev/pandas/issues/29896 - # tests for reindexing a multi-indexed DataFrame with a new MultiIndex - # - # confirms that we can reindex a multi-indexed DataFrame with a new - # MultiIndex object correctly when using no filling, backfilling, and - # padding - # - # The DataFrame, `df`, used in this test is: - # c - # a b - # -1 0 A - # 1 B - # 2 C - # 3 D - # 4 E - # 5 F - # 6 G - # 0 0 A - # 1 B - # 2 C - # 3 D - # 4 E - # 5 F - # 6 G - # 1 0 A - # 1 B - # 2 C - # 3 D - # 4 E - # 5 F - # 6 G - # - # and the other MultiIndex, `new_multi_index`, is: - # 0: 0 0.5 - # 1: 2.0 - # 2: 5.0 - # 3: 5.8 - df = DataFrame( - { - "a": [-1] * 7 + [0] * 7 + [1] * 7, - "b": list(range(7)) * 3, - "c": ["A", "B", "C", "D", "E", "F", "G"] * 3, - } - ).set_index(["a", "b"]) - new_index = [0.5, 2.0, 5.0, 5.8] - new_multi_index = MultiIndex.from_product([[0], new_index], names=["a", "b"]) - - # reindexing w/o a `method` value - reindexed = df.reindex(new_multi_index) - expected = DataFrame( - {"a": [0] * 4, "b": new_index, "c": [np.nan, "C", "F", np.nan]} - ).set_index(["a", "b"]) - tm.assert_frame_equal(expected, reindexed) - - # reindexing with backfilling - expected = DataFrame( - {"a": [0] * 4, "b": new_index, "c": ["B", "C", "F", "G"]} - ).set_index(["a", "b"]) - reindexed_with_backfilling = df.reindex(new_multi_index, method="bfill") - tm.assert_frame_equal(expected, reindexed_with_backfilling) - - reindexed_with_backfilling = df.reindex(new_multi_index, method="backfill") - tm.assert_frame_equal(expected, reindexed_with_backfilling) - - # reindexing with padding - expected = DataFrame( - {"a": [0] * 4, "b": new_index, "c": ["A", "C", "F", "F"]} - ).set_index(["a", "b"]) - reindexed_with_padding = df.reindex(new_multi_index, method="pad") - tm.assert_frame_equal(expected, reindexed_with_padding) - - reindexed_with_padding = df.reindex(new_multi_index, method="ffill") - tm.assert_frame_equal(expected, reindexed_with_padding) - def test_set_value_with_index_dtype_change(self): df_orig = DataFrame(np.random.randn(3, 3), index=range(3), columns=list("ABC")) @@ -1553,216 +1478,11 @@ def test_loc_duplicates(self): df.loc[trange[bool_idx], "A"] += 6 tm.assert_frame_equal(df, expected) - @pytest.mark.parametrize( - "method,expected_values", - [ - ("nearest", [0, 1, 1, 2]), - ("pad", [np.nan, 0, 1, 1]), - ("backfill", [0, 1, 2, 2]), - ], - ) - def test_reindex_methods(self, method, expected_values): - df = DataFrame({"x": list(range(5))}) - target = np.array([-0.1, 0.9, 1.1, 1.5]) - - expected = DataFrame({"x": expected_values}, index=target) - actual = df.reindex(target, method=method) - tm.assert_frame_equal(expected, actual) - - actual = df.reindex(target, method=method, tolerance=1) - tm.assert_frame_equal(expected, actual) - actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1]) - tm.assert_frame_equal(expected, actual) - - e2 = expected[::-1] - actual = df.reindex(target[::-1], method=method) - tm.assert_frame_equal(e2, actual) - - new_order = [3, 0, 2, 1] - e2 = expected.iloc[new_order] - actual = df.reindex(target[new_order], method=method) - tm.assert_frame_equal(e2, actual) - - switched_method = ( - "pad" if method == "backfill" else "backfill" if method == "pad" else method - ) - actual = df[::-1].reindex(target, method=switched_method) - tm.assert_frame_equal(expected, actual) - - def test_reindex_methods_nearest_special(self): - df = DataFrame({"x": list(range(5))}) - target = np.array([-0.1, 0.9, 1.1, 1.5]) - - expected = DataFrame({"x": [0, 1, 1, np.nan]}, index=target) - actual = df.reindex(target, method="nearest", tolerance=0.2) - tm.assert_frame_equal(expected, actual) - - expected = DataFrame({"x": [0, np.nan, 1, np.nan]}, index=target) - actual = df.reindex(target, method="nearest", tolerance=[0.5, 0.01, 0.4, 0.1]) - tm.assert_frame_equal(expected, actual) - - def test_reindex_nearest_tz(self, tz_aware_fixture): - # GH26683 - tz = tz_aware_fixture - idx = pd.date_range("2019-01-01", periods=5, tz=tz) - df = DataFrame({"x": list(range(5))}, index=idx) - - expected = df.head(3) - actual = df.reindex(idx[:3], method="nearest") - tm.assert_frame_equal(expected, actual) - - def test_reindex_nearest_tz_empty_frame(self): - # https://github.com/pandas-dev/pandas/issues/31964 - dti = pd.DatetimeIndex(["2016-06-26 14:27:26+00:00"]) - df = DataFrame(index=pd.DatetimeIndex(["2016-07-04 14:00:59+00:00"])) - expected = DataFrame(index=dti) - result = df.reindex(dti, method="nearest") - tm.assert_frame_equal(result, expected) - - def test_reindex_frame_add_nat(self): - rng = date_range("1/1/2000 00:00:00", periods=10, freq="10s") - df = DataFrame({"A": np.random.randn(len(rng)), "B": rng}) - - result = df.reindex(range(15)) - assert np.issubdtype(result["B"].dtype, np.dtype("M8[ns]")) - - mask = com.isna(result)["B"] - assert mask[-5:].all() - assert not mask[:-5].any() - - def test_reindex_limit(self): - # GH 28631 - data = [["A", "A", "A"], ["B", "B", "B"], ["C", "C", "C"], ["D", "D", "D"]] - exp_data = [ - ["A", "A", "A"], - ["B", "B", "B"], - ["C", "C", "C"], - ["D", "D", "D"], - ["D", "D", "D"], - [np.nan, np.nan, np.nan], - ] - df = DataFrame(data) - result = df.reindex([0, 1, 2, 3, 4, 5], method="ffill", limit=1) - expected = DataFrame(exp_data) - tm.assert_frame_equal(result, expected) - def test_set_dataframe_column_ns_dtype(self): x = DataFrame([datetime.now(), datetime.now()]) assert x[0].dtype == np.dtype("M8[ns]") - def test_non_monotonic_reindex_methods(self): - dr = pd.date_range("2013-08-01", periods=6, freq="B") - data = np.random.randn(6, 1) - df = DataFrame(data, index=dr, columns=list("A")) - df_rev = DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], columns=list("A")) - # index is not monotonic increasing or decreasing - msg = "index must be monotonic increasing or decreasing" - with pytest.raises(ValueError, match=msg): - df_rev.reindex(df.index, method="pad") - with pytest.raises(ValueError, match=msg): - df_rev.reindex(df.index, method="ffill") - with pytest.raises(ValueError, match=msg): - df_rev.reindex(df.index, method="bfill") - with pytest.raises(ValueError, match=msg): - df_rev.reindex(df.index, method="nearest") - - def test_reindex_level(self): - from itertools import permutations - - icol = ["jim", "joe", "jolie"] - - def verify_first_level(df, level, idx, check_index_type=True): - def f(val): - return np.nonzero((df[level] == val).to_numpy())[0] - - i = np.concatenate(list(map(f, idx))) - left = df.set_index(icol).reindex(idx, level=level) - right = df.iloc[i].set_index(icol) - tm.assert_frame_equal(left, right, check_index_type=check_index_type) - - def verify(df, level, idx, indexer, check_index_type=True): - left = df.set_index(icol).reindex(idx, level=level) - right = df.iloc[indexer].set_index(icol) - tm.assert_frame_equal(left, right, check_index_type=check_index_type) - - df = DataFrame( - { - "jim": list("B" * 4 + "A" * 2 + "C" * 3), - "joe": list("abcdeabcd")[::-1], - "jolie": [10, 20, 30] * 3, - "joline": np.random.randint(0, 1000, 9), - } - ) - - target = [ - ["C", "B", "A"], - ["F", "C", "A", "D"], - ["A"], - ["A", "B", "C"], - ["C", "A", "B"], - ["C", "B"], - ["C", "A"], - ["A", "B"], - ["B", "A", "C"], - ] - - for idx in target: - verify_first_level(df, "jim", idx) - - # reindex by these causes different MultiIndex levels - for idx in [["D", "F"], ["A", "C", "B"]]: - verify_first_level(df, "jim", idx, check_index_type=False) - - verify(df, "joe", list("abcde"), [3, 2, 1, 0, 5, 4, 8, 7, 6]) - verify(df, "joe", list("abcd"), [3, 2, 1, 0, 5, 8, 7, 6]) - verify(df, "joe", list("abc"), [3, 2, 1, 8, 7, 6]) - verify(df, "joe", list("eca"), [1, 3, 4, 6, 8]) - verify(df, "joe", list("edc"), [0, 1, 4, 5, 6]) - verify(df, "joe", list("eadbc"), [3, 0, 2, 1, 4, 5, 8, 7, 6]) - verify(df, "joe", list("edwq"), [0, 4, 5]) - verify(df, "joe", list("wq"), [], check_index_type=False) - - df = DataFrame( - { - "jim": ["mid"] * 5 + ["btm"] * 8 + ["top"] * 7, - "joe": ["3rd"] * 2 - + ["1st"] * 3 - + ["2nd"] * 3 - + ["1st"] * 2 - + ["3rd"] * 3 - + ["1st"] * 2 - + ["3rd"] * 3 - + ["2nd"] * 2, - # this needs to be jointly unique with jim and joe or - # reindexing will fail ~1.5% of the time, this works - # out to needing unique groups of same size as joe - "jolie": np.concatenate( - [ - np.random.choice(1000, x, replace=False) - for x in [2, 3, 3, 2, 3, 2, 3, 2] - ] - ), - "joline": np.random.randn(20).round(3) * 10, - } - ) - - for idx in permutations(df["jim"].unique()): - for i in range(3): - verify_first_level(df, "jim", idx[: i + 1]) - - i = [2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10, 11, 12, 13, 14, 18, 19, 15, 16, 17] - verify(df, "joe", ["1st", "2nd", "3rd"], i) - - i = [0, 1, 2, 3, 4, 10, 11, 12, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 13, 14] - verify(df, "joe", ["3rd", "2nd", "1st"], i) - - i = [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17] - verify(df, "joe", ["2nd", "3rd"], i) - - i = [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14] - verify(df, "joe", ["3rd", "1st"], i) - - def test_getitem_ix_float_duplicates(self): + def test_iloc_getitem_float_duplicates(self): df = DataFrame( np.random.randn(3, 3), index=[0.1, 0.2, 0.2], columns=list("abc") ) @@ -1808,7 +1528,7 @@ def test_setitem_with_unaligned_tz_aware_datetime_column(self): df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]] tm.assert_series_equal(df["dates"], column) - def test_setitem_datetime_coercion(self): + def test_loc_setitem_datetime_coercion(self): # gh-1048 df = DataFrame({"c": [pd.Timestamp("2010-10-01")] * 3}) df.loc[0:1, "c"] = np.datetime64("2008-08-08") @@ -1817,7 +1537,7 @@ def test_setitem_datetime_coercion(self): df.loc[2, "c"] = date(2005, 5, 5) assert pd.Timestamp("2005-05-05") == df.loc[2, "c"] - def test_setitem_datetimelike_with_inference(self): + def test_loc_setitem_datetimelike_with_inference(self): # GH 7592 # assignment of timedeltas with NaT @@ -1840,7 +1560,7 @@ def test_setitem_datetimelike_with_inference(self): tm.assert_series_equal(result, expected) @pytest.mark.parametrize("idxer", ["var", ["var"]]) - def test_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture): + def test_loc_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture): # GH 11365 tz = tz_naive_fixture idx = date_range(start="2015-07-12", periods=3, freq="H", tz=tz) @@ -1895,7 +1615,7 @@ def test_at_time_between_time_datetimeindex(self): result.loc[bkey] = df.iloc[binds] tm.assert_frame_equal(result, df) - def test_index_namedtuple(self): + def test_loc_getitem_index_namedtuple(self): from collections import namedtuple IndexType = namedtuple("IndexType", ["a", "b"]) @@ -1908,7 +1628,7 @@ def test_index_namedtuple(self): assert result == 1 @pytest.mark.parametrize("tpl", [tuple([1]), tuple([1, 2])]) - def test_index_single_double_tuples(self, tpl): + def test_loc_getitem_index_single_double_tuples(self, tpl): # GH 20991 idx = pd.Index([tuple([1]), tuple([1, 2])], name="A", tupleize_cols=False) df = DataFrame(index=idx) @@ -1918,7 +1638,7 @@ def test_index_single_double_tuples(self, tpl): expected = DataFrame(index=idx) tm.assert_frame_equal(result, expected) - def test_boolean_indexing(self): + def test_setitem_boolean_indexing(self): idx = list(range(3)) cols = ["A", "B", "C"] df1 = DataFrame( @@ -1941,7 +1661,7 @@ def test_boolean_indexing(self): with pytest.raises(ValueError, match="Item wrong length"): df1[df1.index[:-1] > 2] = -1 - def test_boolean_indexing_mixed(self): + def test_getitem_boolean_indexing_mixed(self): df = DataFrame( { 0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan}, @@ -2014,7 +1734,7 @@ def test_type_error_multiindex(self): result = dg["x", 0] tm.assert_series_equal(result, expected) - def test_interval_index(self): + def test_loc_getitem_interval_index(self): # GH 19977 index = pd.interval_range(start=0, periods=3) df = DataFrame( @@ -2080,18 +1800,6 @@ def test_setitem(self, uint64_frame): ), ) - def test_set_reset(self): - - idx = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10], name="foo") - - # set/reset - df = DataFrame({"A": [0, 1, 2]}, index=idx) - result = df.reset_index() - assert result["foo"].dtype == np.dtype("uint64") - - df = result.set_index("foo") - tm.assert_index_equal(df.index, idx) - def test_object_casting_indexing_wraps_datetimelike(): # GH#31649, check the indexing methods all the way down the stack diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 8313ab0b99bac..87c6ae09aac11 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -8,10 +8,12 @@ DataFrame, Index, Interval, + NaT, Period, Series, Timestamp, date_range, + notna, ) import pandas._testing as tm from pandas.core.arrays import SparseArray @@ -180,3 +182,34 @@ def test_setitem_extension_types(self, obj, dtype): df["obj"] = obj tm.assert_frame_equal(df, expected) + + def test_setitem_dt64tz(self, timezone_frame): + + df = timezone_frame + idx = df["B"].rename("foo") + + # setitem + df["C"] = idx + tm.assert_series_equal(df["C"], Series(idx, name="C")) + + df["D"] = "foo" + df["D"] = idx + tm.assert_series_equal(df["D"], Series(idx, name="D")) + del df["D"] + + # assert that A & C are not sharing the same base (e.g. they + # are copies) + b1 = df._mgr.blocks[1] + b2 = df._mgr.blocks[2] + tm.assert_extension_array_equal(b1.values, b2.values) + b1base = b1.values._data.base + b2base = b2.values._data.base + assert b1base is None or (id(b1base) != id(b2base)) + + # with nan + df2 = df.copy() + df2.iloc[1, 1] = NaT + df2.iloc[1, 2] = NaT + result = df2["B"] + tm.assert_series_equal(notna(result), Series([True, False, True], name="B")) + tm.assert_series_equal(df2.dtypes, df.dtypes) diff --git a/pandas/tests/frame/indexing/test_sparse.py b/pandas/tests/frame/indexing/test_sparse.py index 04e1c8b94c4d9..c0cd7faafb4db 100644 --- a/pandas/tests/frame/indexing/test_sparse.py +++ b/pandas/tests/frame/indexing/test_sparse.py @@ -27,7 +27,7 @@ def test_getitem_sparse_column(self): @pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"]) @pytest.mark.parametrize("dtype", [np.int64, np.float64, complex]) @td.skip_if_no_scipy - def test_locindexer_from_spmatrix(self, spmatrix_t, dtype): + def test_loc_getitem_from_spmatrix(self, spmatrix_t, dtype): import scipy.sparse spmatrix_t = getattr(scipy.sparse, spmatrix_t) @@ -50,21 +50,6 @@ def test_locindexer_from_spmatrix(self, spmatrix_t, dtype): expected = np.full(cols, SparseDtype(dtype, fill_value=0)) tm.assert_numpy_array_equal(result, expected) - def test_reindex(self): - # https://github.com/pandas-dev/pandas/issues/35286 - df = pd.DataFrame( - {"A": [0, 1], "B": pd.array([0, 1], dtype=pd.SparseDtype("int64", 0))} - ) - result = df.reindex([0, 2]) - expected = pd.DataFrame( - { - "A": [0.0, np.nan], - "B": pd.array([0.0, np.nan], dtype=pd.SparseDtype("float64", 0.0)), - }, - index=[0, 2], - ) - tm.assert_frame_equal(result, expected) - def test_all_sparse(self): df = pd.DataFrame({"A": pd.array([0, 0], dtype=pd.SparseDtype("int64"))}) result = df.loc[[0, 1]] diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 95209c0c35195..3495247585236 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -642,3 +642,14 @@ def test_df_where_with_category(self, kwargs): expected = Series(A, name="A") tm.assert_series_equal(result, expected) + + def test_where_categorical_filtering(self): + # GH#22609 Verify filtering operations on DataFrames with categorical Series + df = DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"]) + df["b"] = df["b"].astype("category") + + result = df.where(df["a"] > 0) + expected = df.copy() + expected.loc[0, :] = np.nan + + tm.assert_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index 99494191c043a..5a5aac87b057d 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -1,17 +1,312 @@ from datetime import datetime +from itertools import permutations import numpy as np import pytest import pandas as pd -from pandas import Categorical, DataFrame, Index, Series, date_range, isna +from pandas import Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna import pandas._testing as tm +import pandas.core.common as com class TestDataFrameSelectReindex: # These are specific reindex-based tests; other indexing tests should go in # test_indexing + def test_reindex_with_multi_index(self): + # https://github.com/pandas-dev/pandas/issues/29896 + # tests for reindexing a multi-indexed DataFrame with a new MultiIndex + # + # confirms that we can reindex a multi-indexed DataFrame with a new + # MultiIndex object correctly when using no filling, backfilling, and + # padding + # + # The DataFrame, `df`, used in this test is: + # c + # a b + # -1 0 A + # 1 B + # 2 C + # 3 D + # 4 E + # 5 F + # 6 G + # 0 0 A + # 1 B + # 2 C + # 3 D + # 4 E + # 5 F + # 6 G + # 1 0 A + # 1 B + # 2 C + # 3 D + # 4 E + # 5 F + # 6 G + # + # and the other MultiIndex, `new_multi_index`, is: + # 0: 0 0.5 + # 1: 2.0 + # 2: 5.0 + # 3: 5.8 + df = DataFrame( + { + "a": [-1] * 7 + [0] * 7 + [1] * 7, + "b": list(range(7)) * 3, + "c": ["A", "B", "C", "D", "E", "F", "G"] * 3, + } + ).set_index(["a", "b"]) + new_index = [0.5, 2.0, 5.0, 5.8] + new_multi_index = MultiIndex.from_product([[0], new_index], names=["a", "b"]) + + # reindexing w/o a `method` value + reindexed = df.reindex(new_multi_index) + expected = DataFrame( + {"a": [0] * 4, "b": new_index, "c": [np.nan, "C", "F", np.nan]} + ).set_index(["a", "b"]) + tm.assert_frame_equal(expected, reindexed) + + # reindexing with backfilling + expected = DataFrame( + {"a": [0] * 4, "b": new_index, "c": ["B", "C", "F", "G"]} + ).set_index(["a", "b"]) + reindexed_with_backfilling = df.reindex(new_multi_index, method="bfill") + tm.assert_frame_equal(expected, reindexed_with_backfilling) + + reindexed_with_backfilling = df.reindex(new_multi_index, method="backfill") + tm.assert_frame_equal(expected, reindexed_with_backfilling) + + # reindexing with padding + expected = DataFrame( + {"a": [0] * 4, "b": new_index, "c": ["A", "C", "F", "F"]} + ).set_index(["a", "b"]) + reindexed_with_padding = df.reindex(new_multi_index, method="pad") + tm.assert_frame_equal(expected, reindexed_with_padding) + + reindexed_with_padding = df.reindex(new_multi_index, method="ffill") + tm.assert_frame_equal(expected, reindexed_with_padding) + + @pytest.mark.parametrize( + "method,expected_values", + [ + ("nearest", [0, 1, 1, 2]), + ("pad", [np.nan, 0, 1, 1]), + ("backfill", [0, 1, 2, 2]), + ], + ) + def test_reindex_methods(self, method, expected_values): + df = DataFrame({"x": list(range(5))}) + target = np.array([-0.1, 0.9, 1.1, 1.5]) + + expected = DataFrame({"x": expected_values}, index=target) + actual = df.reindex(target, method=method) + tm.assert_frame_equal(expected, actual) + + actual = df.reindex(target, method=method, tolerance=1) + tm.assert_frame_equal(expected, actual) + actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1]) + tm.assert_frame_equal(expected, actual) + + e2 = expected[::-1] + actual = df.reindex(target[::-1], method=method) + tm.assert_frame_equal(e2, actual) + + new_order = [3, 0, 2, 1] + e2 = expected.iloc[new_order] + actual = df.reindex(target[new_order], method=method) + tm.assert_frame_equal(e2, actual) + + switched_method = ( + "pad" if method == "backfill" else "backfill" if method == "pad" else method + ) + actual = df[::-1].reindex(target, method=switched_method) + tm.assert_frame_equal(expected, actual) + + def test_reindex_methods_nearest_special(self): + df = DataFrame({"x": list(range(5))}) + target = np.array([-0.1, 0.9, 1.1, 1.5]) + + expected = DataFrame({"x": [0, 1, 1, np.nan]}, index=target) + actual = df.reindex(target, method="nearest", tolerance=0.2) + tm.assert_frame_equal(expected, actual) + + expected = DataFrame({"x": [0, np.nan, 1, np.nan]}, index=target) + actual = df.reindex(target, method="nearest", tolerance=[0.5, 0.01, 0.4, 0.1]) + tm.assert_frame_equal(expected, actual) + + def test_reindex_nearest_tz(self, tz_aware_fixture): + # GH26683 + tz = tz_aware_fixture + idx = pd.date_range("2019-01-01", periods=5, tz=tz) + df = DataFrame({"x": list(range(5))}, index=idx) + + expected = df.head(3) + actual = df.reindex(idx[:3], method="nearest") + tm.assert_frame_equal(expected, actual) + + def test_reindex_nearest_tz_empty_frame(self): + # https://github.com/pandas-dev/pandas/issues/31964 + dti = pd.DatetimeIndex(["2016-06-26 14:27:26+00:00"]) + df = DataFrame(index=pd.DatetimeIndex(["2016-07-04 14:00:59+00:00"])) + expected = DataFrame(index=dti) + result = df.reindex(dti, method="nearest") + tm.assert_frame_equal(result, expected) + + def test_reindex_frame_add_nat(self): + rng = date_range("1/1/2000 00:00:00", periods=10, freq="10s") + df = DataFrame({"A": np.random.randn(len(rng)), "B": rng}) + + result = df.reindex(range(15)) + assert np.issubdtype(result["B"].dtype, np.dtype("M8[ns]")) + + mask = com.isna(result)["B"] + assert mask[-5:].all() + assert not mask[:-5].any() + + def test_reindex_limit(self): + # GH 28631 + data = [["A", "A", "A"], ["B", "B", "B"], ["C", "C", "C"], ["D", "D", "D"]] + exp_data = [ + ["A", "A", "A"], + ["B", "B", "B"], + ["C", "C", "C"], + ["D", "D", "D"], + ["D", "D", "D"], + [np.nan, np.nan, np.nan], + ] + df = DataFrame(data) + result = df.reindex([0, 1, 2, 3, 4, 5], method="ffill", limit=1) + expected = DataFrame(exp_data) + tm.assert_frame_equal(result, expected) + + def test_reindex_level(self): + icol = ["jim", "joe", "jolie"] + + def verify_first_level(df, level, idx, check_index_type=True): + def f(val): + return np.nonzero((df[level] == val).to_numpy())[0] + + i = np.concatenate(list(map(f, idx))) + left = df.set_index(icol).reindex(idx, level=level) + right = df.iloc[i].set_index(icol) + tm.assert_frame_equal(left, right, check_index_type=check_index_type) + + def verify(df, level, idx, indexer, check_index_type=True): + left = df.set_index(icol).reindex(idx, level=level) + right = df.iloc[indexer].set_index(icol) + tm.assert_frame_equal(left, right, check_index_type=check_index_type) + + df = DataFrame( + { + "jim": list("B" * 4 + "A" * 2 + "C" * 3), + "joe": list("abcdeabcd")[::-1], + "jolie": [10, 20, 30] * 3, + "joline": np.random.randint(0, 1000, 9), + } + ) + + target = [ + ["C", "B", "A"], + ["F", "C", "A", "D"], + ["A"], + ["A", "B", "C"], + ["C", "A", "B"], + ["C", "B"], + ["C", "A"], + ["A", "B"], + ["B", "A", "C"], + ] + + for idx in target: + verify_first_level(df, "jim", idx) + + # reindex by these causes different MultiIndex levels + for idx in [["D", "F"], ["A", "C", "B"]]: + verify_first_level(df, "jim", idx, check_index_type=False) + + verify(df, "joe", list("abcde"), [3, 2, 1, 0, 5, 4, 8, 7, 6]) + verify(df, "joe", list("abcd"), [3, 2, 1, 0, 5, 8, 7, 6]) + verify(df, "joe", list("abc"), [3, 2, 1, 8, 7, 6]) + verify(df, "joe", list("eca"), [1, 3, 4, 6, 8]) + verify(df, "joe", list("edc"), [0, 1, 4, 5, 6]) + verify(df, "joe", list("eadbc"), [3, 0, 2, 1, 4, 5, 8, 7, 6]) + verify(df, "joe", list("edwq"), [0, 4, 5]) + verify(df, "joe", list("wq"), [], check_index_type=False) + + df = DataFrame( + { + "jim": ["mid"] * 5 + ["btm"] * 8 + ["top"] * 7, + "joe": ["3rd"] * 2 + + ["1st"] * 3 + + ["2nd"] * 3 + + ["1st"] * 2 + + ["3rd"] * 3 + + ["1st"] * 2 + + ["3rd"] * 3 + + ["2nd"] * 2, + # this needs to be jointly unique with jim and joe or + # reindexing will fail ~1.5% of the time, this works + # out to needing unique groups of same size as joe + "jolie": np.concatenate( + [ + np.random.choice(1000, x, replace=False) + for x in [2, 3, 3, 2, 3, 2, 3, 2] + ] + ), + "joline": np.random.randn(20).round(3) * 10, + } + ) + + for idx in permutations(df["jim"].unique()): + for i in range(3): + verify_first_level(df, "jim", idx[: i + 1]) + + i = [2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10, 11, 12, 13, 14, 18, 19, 15, 16, 17] + verify(df, "joe", ["1st", "2nd", "3rd"], i) + + i = [0, 1, 2, 3, 4, 10, 11, 12, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 13, 14] + verify(df, "joe", ["3rd", "2nd", "1st"], i) + + i = [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17] + verify(df, "joe", ["2nd", "3rd"], i) + + i = [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14] + verify(df, "joe", ["3rd", "1st"], i) + + def test_non_monotonic_reindex_methods(self): + dr = date_range("2013-08-01", periods=6, freq="B") + data = np.random.randn(6, 1) + df = DataFrame(data, index=dr, columns=list("A")) + df_rev = DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], columns=list("A")) + # index is not monotonic increasing or decreasing + msg = "index must be monotonic increasing or decreasing" + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method="pad") + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method="ffill") + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method="bfill") + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method="nearest") + + def test_reindex_sparse(self): + # https://github.com/pandas-dev/pandas/issues/35286 + df = DataFrame( + {"A": [0, 1], "B": pd.array([0, 1], dtype=pd.SparseDtype("int64", 0))} + ) + result = df.reindex([0, 2]) + expected = DataFrame( + { + "A": [0.0, np.nan], + "B": pd.array([0.0, np.nan], dtype=pd.SparseDtype("float64", 0.0)), + }, + index=[0, 2], + ) + tm.assert_frame_equal(result, expected) + def test_reindex(self, float_frame): datetime_series = tm.makeTimeSeries(nper=30) diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index b88ef0e6691cb..3c14192f02cf6 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -18,6 +18,30 @@ class TestResetIndex: + def test_set_reset(self): + + idx = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10], name="foo") + + # set/reset + df = DataFrame({"A": [0, 1, 2]}, index=idx) + result = df.reset_index() + assert result["foo"].dtype == np.dtype("uint64") + + df = result.set_index("foo") + tm.assert_index_equal(df.index, idx) + + def test_set_index_reset_index_dt64tz(self): + + idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo") + + # set/reset + df = DataFrame({"A": [0, 1, 2]}, index=idx) + result = df.reset_index() + assert result["foo"].dtype == "datetime64[ns, US/Eastern]" + + df = result.set_index("foo") + tm.assert_index_equal(df.index, idx) + def test_reset_index_tz(self, tz_aware_fixture): # GH 3950 # reset_index with single level diff --git a/pandas/tests/indexes/categorical/test_indexing.py b/pandas/tests/indexes/categorical/test_indexing.py index 9cf901c0797d8..c720547aab3f8 100644 --- a/pandas/tests/indexes/categorical/test_indexing.py +++ b/pandas/tests/indexes/categorical/test_indexing.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas import CategoricalIndex, Index, IntervalIndex +from pandas import CategoricalIndex, Index, IntervalIndex, Timestamp import pandas._testing as tm @@ -251,6 +251,32 @@ def test_get_indexer(self): with pytest.raises(NotImplementedError, match=msg): idx2.get_indexer(idx1, method="nearest") + def test_get_indexer_array(self): + arr = np.array( + [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")], + dtype=object, + ) + cats = [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")] + ci = CategoricalIndex(cats, categories=cats, ordered=False, dtype="category") + result = ci.get_indexer(arr) + expected = np.array([0, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_same_categories_same_order(self): + ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) + + result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["a", "b"])) + expected = np.array([1, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19551 + ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) + + result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["b", "a"])) + expected = np.array([1, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + class TestWhere: @pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series]) diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 347ce2262a261..9b52c297ec688 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -435,32 +435,6 @@ def test_loc_listlike_dtypes(self): with pytest.raises(KeyError, match=msg): df.loc[["a", "x"]] - def test_get_indexer_array(self): - arr = np.array( - [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")], - dtype=object, - ) - cats = [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")] - ci = CategoricalIndex(cats, categories=cats, ordered=False, dtype="category") - result = ci.get_indexer(arr) - expected = np.array([0, 1], dtype="intp") - tm.assert_numpy_array_equal(result, expected) - - def test_get_indexer_same_categories_same_order(self): - ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) - - result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["a", "b"])) - expected = np.array([1, 1], dtype="intp") - tm.assert_numpy_array_equal(result, expected) - - def test_get_indexer_same_categories_different_order(self): - # https://github.com/pandas-dev/pandas/issues/19551 - ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) - - result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["b", "a"])) - expected = np.array([1, 1], dtype="intp") - tm.assert_numpy_array_equal(result, expected) - def test_getitem_with_listlike(self): # GH 16115 cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")]) diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py index e6091a63b3e97..8aa4012b3e77c 100644 --- a/pandas/tests/reshape/test_cut.py +++ b/pandas/tests/reshape/test_cut.py @@ -674,3 +674,10 @@ def test_cut_unordered_with_series_labels(): result = pd.cut(s, bins=bins, labels=labels, ordered=False) expected = Series(["a", "a", "b", "b", "c"], dtype="category") tm.assert_series_equal(result, expected) + + +def test_cut_no_warnings(): + df = DataFrame({"value": np.random.randint(0, 100, 20)}) + labels = [f"{i} - {i + 9}" for i in range(0, 100, 10)] + with tm.assert_produces_warning(False): + df["group"] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels)
I've been saving the indexing tests for last-ish because they are some of the hardest to organize consistently. This starts in on that.
https://api.github.com/repos/pandas-dev/pandas/pulls/37319
2020-10-21T17:59:06Z
2020-10-22T23:33:10Z
2020-10-22T23:33:10Z
2020-10-22T23:49:11Z
TST/REF: rename test files
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/methods/test_dropna.py similarity index 100% rename from pandas/tests/frame/test_missing.py rename to pandas/tests/frame/methods/test_dropna.py diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/methods/test_reindex.py similarity index 100% rename from pandas/tests/series/indexing/test_alter_index.py rename to pandas/tests/series/methods/test_reindex.py
https://api.github.com/repos/pandas-dev/pandas/pulls/37317
2020-10-21T17:41:45Z
2020-10-22T00:03:32Z
2020-10-22T00:03:32Z
2020-10-22T01:18:51Z
TST/REF: collect tests by method
diff --git a/pandas/tests/frame/methods/test_values.py b/pandas/tests/frame/methods/test_values.py index cd6c5da8dd3a0..ddfe60773aa8f 100644 --- a/pandas/tests/frame/methods/test_values.py +++ b/pandas/tests/frame/methods/test_values.py @@ -1,6 +1,6 @@ import numpy as np -from pandas import DataFrame, Timestamp, date_range +from pandas import DataFrame, NaT, Timestamp, date_range import pandas._testing as tm @@ -51,3 +51,54 @@ def test_frame_values_with_tz(self): expected = np.concatenate([expected, new], axis=1) result = df.values tm.assert_numpy_array_equal(result, expected) + + def test_interleave_with_tzaware(self, timezone_frame): + + # interleave with object + result = timezone_frame.assign(D="foo").values + expected = np.array( + [ + [ + Timestamp("2013-01-01 00:00:00"), + Timestamp("2013-01-02 00:00:00"), + Timestamp("2013-01-03 00:00:00"), + ], + [ + Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"), + NaT, + Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"), + ], + [ + Timestamp("2013-01-01 00:00:00+0100", tz="CET"), + NaT, + Timestamp("2013-01-03 00:00:00+0100", tz="CET"), + ], + ["foo", "foo", "foo"], + ], + dtype=object, + ).T + tm.assert_numpy_array_equal(result, expected) + + # interleave with only datetime64[ns] + result = timezone_frame.values + expected = np.array( + [ + [ + Timestamp("2013-01-01 00:00:00"), + Timestamp("2013-01-02 00:00:00"), + Timestamp("2013-01-03 00:00:00"), + ], + [ + Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"), + NaT, + Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"), + ], + [ + Timestamp("2013-01-01 00:00:00+0100", tz="CET"), + NaT, + Timestamp("2013-01-03 00:00:00+0100", tz="CET"), + ], + ], + dtype=object, + ).T + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 3e7bdee414c69..1add4c0db2e53 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -6,7 +6,7 @@ from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd -from pandas import DataFrame, Series, Timestamp, date_range, option_context +from pandas import DataFrame, Series, date_range, option_context import pandas._testing as tm @@ -18,22 +18,6 @@ def _check_cast(df, v): class TestDataFrameDataTypes: - def test_concat_empty_dataframe_dtypes(self): - df = DataFrame(columns=list("abc")) - df["a"] = df["a"].astype(np.bool_) - df["b"] = df["b"].astype(np.int32) - df["c"] = df["c"].astype(np.float64) - - result = pd.concat([df, df]) - assert result["a"].dtype == np.bool_ - assert result["b"].dtype == np.int32 - assert result["c"].dtype == np.float64 - - result = pd.concat([df, df.astype(np.float64)]) - assert result["a"].dtype == np.object_ - assert result["b"].dtype == np.float64 - assert result["c"].dtype == np.float64 - def test_empty_frame_dtypes(self): empty_df = DataFrame() tm.assert_series_equal(empty_df.dtypes, Series(dtype=object)) @@ -244,56 +228,3 @@ def test_str_to_small_float_conversion_type(self): result.loc[result.index, "A"] = [float(x) for x in col_data] expected = DataFrame(col_data, columns=["A"], dtype=float) tm.assert_frame_equal(result, expected) - - -class TestDataFrameDatetimeWithTZ: - def test_interleave(self, timezone_frame): - - # interleave with object - result = timezone_frame.assign(D="foo").values - expected = np.array( - [ - [ - Timestamp("2013-01-01 00:00:00"), - Timestamp("2013-01-02 00:00:00"), - Timestamp("2013-01-03 00:00:00"), - ], - [ - Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"), - pd.NaT, - Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"), - ], - [ - Timestamp("2013-01-01 00:00:00+0100", tz="CET"), - pd.NaT, - Timestamp("2013-01-03 00:00:00+0100", tz="CET"), - ], - ["foo", "foo", "foo"], - ], - dtype=object, - ).T - tm.assert_numpy_array_equal(result, expected) - - # interleave with only datetime64[ns] - result = timezone_frame.values - expected = np.array( - [ - [ - Timestamp("2013-01-01 00:00:00"), - Timestamp("2013-01-02 00:00:00"), - Timestamp("2013-01-03 00:00:00"), - ], - [ - Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"), - pd.NaT, - Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"), - ], - [ - Timestamp("2013-01-01 00:00:00+0100", tz="CET"), - pd.NaT, - Timestamp("2013-01-03 00:00:00+0100", tz="CET"), - ], - ], - dtype=object, - ).T - tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index a5b862adc8768..da7435b9609b2 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2604,6 +2604,22 @@ def test_concat_odered_dict(self): ) tm.assert_series_equal(result, expected) + def test_concat_empty_dataframe_dtypes(self): + df = DataFrame(columns=list("abc")) + df["a"] = df["a"].astype(np.bool_) + df["b"] = df["b"].astype(np.int32) + df["c"] = df["c"].astype(np.float64) + + result = pd.concat([df, df]) + assert result["a"].dtype == np.bool_ + assert result["b"].dtype == np.int32 + assert result["c"].dtype == np.float64 + + result = pd.concat([df, df.astype(np.float64)]) + assert result["a"].dtype == np.object_ + assert result["b"].dtype == np.float64 + assert result["c"].dtype == np.float64 + @pytest.mark.parametrize("pdt", [Series, pd.DataFrame]) @pytest.mark.parametrize("dt", np.sctypes["float"]) diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 229d9de5eaad2..6ac1397fa7695 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -1,7 +1,9 @@ +from datetime import date + import numpy as np import pytest -from pandas import MultiIndex, NaT, Series, date_range, period_range +from pandas import MultiIndex, NaT, Series, Timestamp, date_range, period_range import pandas.testing as tm @@ -28,6 +30,26 @@ def test_setitem_multiindex_empty_slice(self): result.loc[[]] = 0 tm.assert_series_equal(result, expected) + def test_setitem_with_string_index(self): + # GH#23451 + ser = Series([1, 2, 3], index=["Date", "b", "other"]) + ser["Date"] = date.today() + assert ser.Date == date.today() + assert ser["Date"] == date.today() + + def test_setitem_with_different_tz_casts_to_object(self): + # GH#24024 + ser = Series(date_range("2000", periods=2, tz="US/Central")) + ser[0] = Timestamp("2000", tz="US/Eastern") + expected = Series( + [ + Timestamp("2000-01-01 00:00:00-05:00", tz="US/Eastern"), + Timestamp("2000-01-02 00:00:00-06:00", tz="US/Central"), + ], + dtype=object, + ) + tm.assert_series_equal(ser, expected) + class TestSetitemPeriodDtype: @pytest.mark.parametrize("na_val", [None, np.nan]) diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_dt_accessor.py similarity index 97% rename from pandas/tests/series/test_datetime_values.py rename to pandas/tests/series/test_dt_accessor.py index 53b465fa814b3..0e31ffcb30b93 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_dt_accessor.py @@ -655,26 +655,6 @@ def test_dt_timetz_accessor(self, tz_naive_fixture): result = s.dt.timetz tm.assert_series_equal(result, expected) - def test_setitem_with_string_index(self): - # GH 23451 - x = Series([1, 2, 3], index=["Date", "b", "other"]) - x["Date"] = date.today() - assert x.Date == date.today() - assert x["Date"] == date.today() - - def test_setitem_with_different_tz(self): - # GH#24024 - ser = Series(pd.date_range("2000", periods=2, tz="US/Central")) - ser[0] = pd.Timestamp("2000", tz="US/Eastern") - expected = Series( - [ - pd.Timestamp("2000-01-01 00:00:00-05:00", tz="US/Eastern"), - pd.Timestamp("2000-01-02 00:00:00-06:00", tz="US/Central"), - ], - dtype=object, - ) - tm.assert_series_equal(ser, expected) - @pytest.mark.parametrize( "input_series, expected_output", [
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37315
2020-10-21T16:06:01Z
2020-10-22T23:31:51Z
2020-10-22T23:31:51Z
2020-10-22T23:49:49Z
TST/REF: collect stack/unstack tests
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_stack_unstack.py similarity index 66% rename from pandas/tests/frame/test_reshape.py rename to pandas/tests/frame/test_stack_unstack.py index 83a3b65d4b601..3db061cd6a31c 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -1,4 +1,5 @@ from datetime import datetime +from io import StringIO import itertools import numpy as np @@ -10,95 +11,6 @@ class TestDataFrameReshape: - def test_pivot(self): - data = { - "index": ["A", "B", "C", "C", "B", "A"], - "columns": ["One", "One", "One", "Two", "Two", "Two"], - "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], - } - - frame = DataFrame(data) - pivoted = frame.pivot(index="index", columns="columns", values="values") - - expected = DataFrame( - { - "One": {"A": 1.0, "B": 2.0, "C": 3.0}, - "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, - } - ) - - expected.index.name, expected.columns.name = "index", "columns" - tm.assert_frame_equal(pivoted, expected) - - # name tracking - assert pivoted.index.name == "index" - assert pivoted.columns.name == "columns" - - # don't specify values - pivoted = frame.pivot(index="index", columns="columns") - assert pivoted.index.name == "index" - assert pivoted.columns.names == (None, "columns") - - def test_pivot_duplicates(self): - data = DataFrame( - { - "a": ["bar", "bar", "foo", "foo", "foo"], - "b": ["one", "two", "one", "one", "two"], - "c": [1.0, 2.0, 3.0, 3.0, 4.0], - } - ) - with pytest.raises(ValueError, match="duplicate entries"): - data.pivot("a", "b", "c") - - def test_pivot_empty(self): - df = DataFrame(columns=["a", "b", "c"]) - result = df.pivot("a", "b", "c") - expected = DataFrame() - tm.assert_frame_equal(result, expected, check_names=False) - - def test_pivot_integer_bug(self): - df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")]) - - result = df.pivot(index=1, columns=0, values=2) - repr(result) - tm.assert_index_equal(result.columns, Index(["A", "B"], name=0)) - - def test_pivot_index_none(self): - # gh-3962 - data = { - "index": ["A", "B", "C", "C", "B", "A"], - "columns": ["One", "One", "One", "Two", "Two", "Two"], - "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], - } - - frame = DataFrame(data).set_index("index") - result = frame.pivot(columns="columns", values="values") - expected = DataFrame( - { - "One": {"A": 1.0, "B": 2.0, "C": 3.0}, - "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, - } - ) - - expected.index.name, expected.columns.name = "index", "columns" - tm.assert_frame_equal(result, expected) - - # omit values - result = frame.pivot(columns="columns") - - expected.columns = pd.MultiIndex.from_tuples( - [("values", "One"), ("values", "Two")], names=[None, "columns"] - ) - expected.index.name = "index" - tm.assert_frame_equal(result, expected, check_names=False) - assert result.index.name == "index" - assert result.columns.names == (None, "columns") - expected.columns = expected.columns.droplevel(0) - result = frame.pivot(columns="columns", values="values") - - expected.columns.name = "columns" - tm.assert_frame_equal(result, expected) - def test_stack_unstack(self, float_frame): df = float_frame.copy() df[:] = np.arange(np.prod(df.shape)).reshape(df.shape) @@ -1309,3 +1221,640 @@ def test_stack_positional_level_duplicate_column_names(): expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns) tm.assert_frame_equal(result, expected) + + +class TestStackUnstackMultiLevel: + def test_unstack(self, multiindex_year_month_day_dataframe_random_data): + # just check that it works for now + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack() + unstacked.unstack() + + # test that ints work + ymd.astype(int).unstack() + + # test that int32 work + ymd.astype(np.int32).unstack() + + @pytest.mark.parametrize( + "result_rows,result_columns,index_product,expected_row", + [ + ( + [[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]], + ["ix1", "ix2", "col1", "col2", "col3", "col4"], + 2, + [None, None, 30.0, None], + ), + ( + [[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]], + ["ix1", "ix2", "col1", "col2", "col3"], + 2, + [None, None, 30.0], + ), + ( + [[1, 1, None, None, 30.0], [2, None, None, None, 30.0]], + ["ix1", "ix2", "col1", "col2", "col3"], + None, + [None, None, 30.0], + ), + ], + ) + def test_unstack_partial( + self, result_rows, result_columns, index_product, expected_row + ): + # check for regressions on this issue: + # https://github.com/pandas-dev/pandas/issues/19351 + # make sure DataFrame.unstack() works when its run on a subset of the DataFrame + # and the Index levels contain values that are not present in the subset + result = DataFrame(result_rows, columns=result_columns).set_index( + ["ix1", "ix2"] + ) + result = result.iloc[1:2].unstack("ix2") + expected = DataFrame( + [expected_row], + columns=pd.MultiIndex.from_product( + [result_columns[2:], [index_product]], names=[None, "ix2"] + ), + index=pd.Index([2], name="ix1"), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_multiple_no_empty_columns(self): + index = MultiIndex.from_tuples( + [(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)] + ) + + s = Series(np.random.randn(4), index=index) + + unstacked = s.unstack([1, 2]) + expected = unstacked.dropna(axis=1, how="all") + tm.assert_frame_equal(unstacked, expected) + + def test_stack(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + + # regular roundtrip + unstacked = ymd.unstack() + restacked = unstacked.stack() + tm.assert_frame_equal(restacked, ymd) + + unlexsorted = ymd.sort_index(level=2) + + unstacked = unlexsorted.unstack(2) + restacked = unstacked.stack() + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) + + unlexsorted = unlexsorted[::-1] + unstacked = unlexsorted.unstack(1) + restacked = unstacked.stack().swaplevel(1, 2) + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) + + unlexsorted = unlexsorted.swaplevel(0, 1) + unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1) + restacked = unstacked.stack(0).swaplevel(1, 2) + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) + + # columns unsorted + unstacked = ymd.unstack() + unstacked = unstacked.sort_index(axis=1, ascending=False) + restacked = unstacked.stack() + tm.assert_frame_equal(restacked, ymd) + + # more than 2 levels in the columns + unstacked = ymd.unstack(1).unstack(1) + + result = unstacked.stack(1) + expected = ymd.unstack() + tm.assert_frame_equal(result, expected) + + result = unstacked.stack(2) + expected = ymd.unstack(1) + tm.assert_frame_equal(result, expected) + + result = unstacked.stack(0) + expected = ymd.stack().unstack(1).unstack(1) + tm.assert_frame_equal(result, expected) + + # not all levels present in each echelon + unstacked = ymd.unstack(2).loc[:, ::3] + stacked = unstacked.stack().stack() + ymd_stacked = ymd.stack() + tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index)) + + # stack with negative number + result = ymd.unstack(0).stack(-2) + expected = ymd.unstack(0).stack(0) + + # GH10417 + def check(left, right): + tm.assert_series_equal(left, right) + assert left.index.is_unique is False + li, ri = left.index, right.index + tm.assert_index_equal(li, ri) + + df = DataFrame( + np.arange(12).reshape(4, 3), + index=list("abab"), + columns=["1st", "2nd", "3rd"], + ) + + mi = MultiIndex( + levels=[["a", "b"], ["1st", "2nd", "3rd"]], + codes=[np.tile(np.arange(2).repeat(3), 2), np.tile(np.arange(3), 4)], + ) + + left, right = df.stack(), Series(np.arange(12), index=mi) + check(left, right) + + df.columns = ["1st", "2nd", "1st"] + mi = MultiIndex( + levels=[["a", "b"], ["1st", "2nd"]], + codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)], + ) + + left, right = df.stack(), Series(np.arange(12), index=mi) + check(left, right) + + tpls = ("a", 2), ("b", 1), ("a", 1), ("b", 2) + df.index = MultiIndex.from_tuples(tpls) + mi = MultiIndex( + levels=[["a", "b"], [1, 2], ["1st", "2nd"]], + codes=[ + np.tile(np.arange(2).repeat(3), 2), + np.repeat([1, 0, 1], [3, 6, 3]), + np.tile([0, 1, 0], 4), + ], + ) + + left, right = df.stack(), Series(np.arange(12), index=mi) + check(left, right) + + def test_unstack_odd_failure(self): + data = """day,time,smoker,sum,len +Fri,Dinner,No,8.25,3. +Fri,Dinner,Yes,27.03,9 +Fri,Lunch,No,3.0,1 +Fri,Lunch,Yes,13.68,6 +Sat,Dinner,No,139.63,45 +Sat,Dinner,Yes,120.77,42 +Sun,Dinner,No,180.57,57 +Sun,Dinner,Yes,66.82,19 +Thur,Dinner,No,3.0,1 +Thur,Lunch,No,117.32,44 +Thur,Lunch,Yes,51.51,17""" + + df = pd.read_csv(StringIO(data)).set_index(["day", "time", "smoker"]) + + # it works, #2100 + result = df.unstack(2) + + recons = result.stack() + tm.assert_frame_equal(recons, df) + + def test_stack_mixed_dtype(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + df = frame.T + df["foo", "four"] = "foo" + df = df.sort_index(level=1, axis=1) + + stacked = df.stack() + result = df["foo"].stack().sort_index() + tm.assert_series_equal(stacked["foo"], result, check_names=False) + assert result.name is None + assert stacked["bar"].dtype == np.float_ + + def test_unstack_bug(self): + df = DataFrame( + { + "state": ["naive", "naive", "naive", "activ", "activ", "activ"], + "exp": ["a", "b", "b", "b", "a", "a"], + "barcode": [1, 2, 3, 4, 1, 3], + "v": ["hi", "hi", "bye", "bye", "bye", "peace"], + "extra": np.arange(6.0), + } + ) + + result = df.groupby(["state", "exp", "barcode", "v"]).apply(len) + + unstacked = result.unstack() + restacked = unstacked.stack() + tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float)) + + def test_stack_unstack_preserve_names(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + unstacked = frame.unstack() + assert unstacked.index.name == "first" + assert unstacked.columns.names == ["exp", "second"] + + restacked = unstacked.stack() + assert restacked.index.names == frame.index.names + + @pytest.mark.parametrize("method", ["stack", "unstack"]) + def test_stack_unstack_wrong_level_name( + self, method, multiindex_dataframe_random_data + ): + # GH 18303 - wrong level name should raise + frame = multiindex_dataframe_random_data + + # A DataFrame with flat axes: + df = frame.loc["foo"] + + with pytest.raises(KeyError, match="does not match index name"): + getattr(df, method)("mistake") + + if method == "unstack": + # Same on a Series: + s = df.iloc[:, 0] + with pytest.raises(KeyError, match="does not match index name"): + getattr(s, method)("mistake") + + def test_unstack_level_name(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.unstack("second") + expected = frame.unstack(level=1) + tm.assert_frame_equal(result, expected) + + def test_stack_level_name(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + unstacked = frame.unstack("second") + result = unstacked.stack("exp") + expected = frame.unstack().stack(0) + tm.assert_frame_equal(result, expected) + + result = frame.stack("exp") + expected = frame.stack() + tm.assert_series_equal(result, expected) + + def test_stack_unstack_multiple( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + expected = ymd.unstack("year").unstack("month") + tm.assert_frame_equal(unstacked, expected) + assert unstacked.columns.names == expected.columns.names + + # series + s = ymd["A"] + s_unstacked = s.unstack(["year", "month"]) + tm.assert_frame_equal(s_unstacked, expected["A"]) + + restacked = unstacked.stack(["year", "month"]) + restacked = restacked.swaplevel(0, 1).swaplevel(1, 2) + restacked = restacked.sort_index(level=0) + + tm.assert_frame_equal(restacked, ymd) + assert restacked.index.names == ymd.index.names + + # GH #451 + unstacked = ymd.unstack([1, 2]) + expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all") + tm.assert_frame_equal(unstacked, expected) + + unstacked = ymd.unstack([2, 1]) + expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all") + tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns]) + + def test_stack_names_and_numbers( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + + # Can't use mixture of names and numbers to stack + with pytest.raises(ValueError, match="level should contain"): + unstacked.stack([0, "month"]) + + def test_stack_multiple_out_of_bounds( + self, multiindex_year_month_day_dataframe_random_data + ): + # nlevels == 3 + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + + with pytest.raises(IndexError, match="Too many levels"): + unstacked.stack([2, 3]) + with pytest.raises(IndexError, match="not a valid level number"): + unstacked.stack([-4, -3]) + + def test_unstack_period_series(self): + # GH4342 + idx1 = pd.PeriodIndex( + ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"], + freq="M", + name="period", + ) + idx2 = Index(["A", "B"] * 3, name="str") + value = [1, 2, 3, 4, 5, 6] + + idx = MultiIndex.from_arrays([idx1, idx2]) + s = Series(value, index=idx) + + result1 = s.unstack() + result2 = s.unstack(level=1) + result3 = s.unstack(level=0) + + e_idx = pd.PeriodIndex( + ["2013-01", "2013-02", "2013-03"], freq="M", name="period" + ) + expected = DataFrame( + {"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"] + ) + expected.columns.name = "str" + + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected.T) + + idx1 = pd.PeriodIndex( + ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"], + freq="M", + name="period1", + ) + + idx2 = pd.PeriodIndex( + ["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"], + freq="M", + name="period2", + ) + idx = MultiIndex.from_arrays([idx1, idx2]) + s = Series(value, index=idx) + + result1 = s.unstack() + result2 = s.unstack(level=1) + result3 = s.unstack(level=0) + + e_idx = pd.PeriodIndex( + ["2013-01", "2013-02", "2013-03"], freq="M", name="period1" + ) + e_cols = pd.PeriodIndex( + ["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"], + freq="M", + name="period2", + ) + expected = DataFrame( + [ + [np.nan, np.nan, np.nan, np.nan, 2, 1], + [np.nan, np.nan, 4, 3, np.nan, np.nan], + [6, 5, np.nan, np.nan, np.nan, np.nan], + ], + index=e_idx, + columns=e_cols, + ) + + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected.T) + + def test_unstack_period_frame(self): + # GH4342 + idx1 = pd.PeriodIndex( + ["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"], + freq="M", + name="period1", + ) + idx2 = pd.PeriodIndex( + ["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"], + freq="M", + name="period2", + ) + value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]} + idx = MultiIndex.from_arrays([idx1, idx2]) + df = DataFrame(value, index=idx) + + result1 = df.unstack() + result2 = df.unstack(level=1) + result3 = df.unstack(level=0) + + e_1 = pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1") + e_2 = pd.PeriodIndex( + ["2013-10", "2013-12", "2014-02", "2013-10", "2013-12", "2014-02"], + freq="M", + name="period2", + ) + e_cols = MultiIndex.from_arrays(["A A A B B B".split(), e_2]) + expected = DataFrame( + [[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], index=e_1, columns=e_cols + ) + + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + + e_1 = pd.PeriodIndex( + ["2014-01", "2014-02", "2014-01", "2014-02"], freq="M", name="period1" + ) + e_2 = pd.PeriodIndex( + ["2013-10", "2013-12", "2014-02"], freq="M", name="period2" + ) + e_cols = MultiIndex.from_arrays(["A A B B".split(), e_1]) + expected = DataFrame( + [[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], index=e_2, columns=e_cols + ) + + tm.assert_frame_equal(result3, expected) + + def test_stack_multiple_bug(self): + # bug when some uniques are not present in the data GH#3170 + id_col = ([1] * 3) + ([2] * 3) + name = (["a"] * 3) + (["b"] * 3) + date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2) + var1 = np.random.randint(0, 100, 6) + df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1)) + + multi = df.set_index(["DATE", "ID"]) + multi.columns.name = "Params" + unst = multi.unstack("ID") + down = unst.resample("W-THU").mean() + + rs = down.stack("ID") + xp = unst.loc[:, ["VAR1"]].resample("W-THU").mean().stack("ID") + xp.columns.name = "Params" + tm.assert_frame_equal(rs, xp) + + def test_stack_dropna(self): + # GH#3997 + df = DataFrame({"A": ["a1", "a2"], "B": ["b1", "b2"], "C": [1, 1]}) + df = df.set_index(["A", "B"]) + + stacked = df.unstack().stack(dropna=False) + assert len(stacked) > len(stacked.dropna()) + + stacked = df.unstack().stack(dropna=True) + tm.assert_frame_equal(stacked, stacked.dropna()) + + def test_unstack_multiple_hierarchical(self): + df = DataFrame( + index=[ + [0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 0, 0, 1, 1], + [0, 1, 0, 1, 0, 1, 0, 1], + ], + columns=[[0, 0, 1, 1], [0, 1, 0, 1]], + ) + + df.index.names = ["a", "b", "c"] + df.columns.names = ["d", "e"] + + # it works! + df.unstack(["b", "c"]) + + def test_unstack_sparse_keyspace(self): + # memory problems with naive impl GH#2278 + # Generate Long File & Test Pivot + NUM_ROWS = 1000 + + df = DataFrame( + { + "A": np.random.randint(100, size=NUM_ROWS), + "B": np.random.randint(300, size=NUM_ROWS), + "C": np.random.randint(-7, 7, size=NUM_ROWS), + "D": np.random.randint(-19, 19, size=NUM_ROWS), + "E": np.random.randint(3000, size=NUM_ROWS), + "F": np.random.randn(NUM_ROWS), + } + ) + + idf = df.set_index(["A", "B", "C", "D", "E"]) + + # it works! is sufficient + idf.unstack("E") + + def test_unstack_unobserved_keys(self): + # related to GH#2278 refactoring + levels = [[0, 1], [0, 1, 2, 3]] + codes = [[0, 0, 1, 1], [0, 2, 0, 2]] + + index = MultiIndex(levels, codes) + + df = DataFrame(np.random.randn(4, 2), index=index) + + result = df.unstack() + assert len(result.columns) == 4 + + recons = result.stack() + tm.assert_frame_equal(recons, df) + + @pytest.mark.slow + def test_unstack_number_of_levels_larger_than_int32(self): + # GH#20601 + df = DataFrame( + np.random.randn(2 ** 16, 2), index=[np.arange(2 ** 16), np.arange(2 ** 16)] + ) + with pytest.raises(ValueError, match="int32 overflow"): + df.unstack() + + def test_stack_order_with_unsorted_levels(self): + # GH#16323 + + def manual_compare_stacked(df, df_stacked, lev0, lev1): + assert all( + df.loc[row, col] == df_stacked.loc[(row, col[lev0]), col[lev1]] + for row in df.index + for col in df.columns + ) + + # deep check for 1-row case + for width in [2, 3]: + levels_poss = itertools.product( + itertools.permutations([0, 1, 2], width), repeat=2 + ) + + for levels in levels_poss: + columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) + df = DataFrame(columns=columns, data=[range(4)]) + for stack_lev in range(2): + df_stacked = df.stack(stack_lev) + manual_compare_stacked(df, df_stacked, stack_lev, 1 - stack_lev) + + # check multi-row case + mi = MultiIndex( + levels=[["A", "C", "B"], ["B", "A", "C"]], + codes=[np.repeat(range(3), 3), np.tile(range(3), 3)], + ) + df = DataFrame( + columns=mi, index=range(5), data=np.arange(5 * len(mi)).reshape(5, -1) + ) + manual_compare_stacked(df, df.stack(0), 0, 1) + + def test_stack_unstack_unordered_multiindex(self): + # GH# 18265 + values = np.arange(5) + data = np.vstack( + [ + [f"b{x}" for x in values], # b0, b1, .. + [f"a{x}" for x in values], # a0, a1, .. + ] + ) + df = DataFrame(data.T, columns=["b", "a"]) + df.columns.name = "first" + second_level_dict = {"x": df} + multi_level_df = pd.concat(second_level_dict, axis=1) + multi_level_df.columns.names = ["second", "first"] + df = multi_level_df.reindex(sorted(multi_level_df.columns), axis=1) + result = df.stack(["first", "second"]).unstack(["first", "second"]) + expected = DataFrame( + [["a0", "b0"], ["a1", "b1"], ["a2", "b2"], ["a3", "b3"], ["a4", "b4"]], + index=[0, 1, 2, 3, 4], + columns=MultiIndex.from_tuples( + [("a", "x"), ("b", "x")], names=["first", "second"] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_preserve_types( + self, multiindex_year_month_day_dataframe_random_data + ): + # GH#403 + ymd = multiindex_year_month_day_dataframe_random_data + ymd["E"] = "foo" + ymd["F"] = 2 + + unstacked = ymd.unstack("month") + assert unstacked["A", 1].dtype == np.float64 + assert unstacked["E", 1].dtype == np.object_ + assert unstacked["F", 1].dtype == np.float64 + + def test_unstack_group_index_overflow(self): + codes = np.tile(np.arange(500), 2) + level = np.arange(500) + + index = MultiIndex( + levels=[level] * 8 + [[0, 1]], + codes=[codes] * 8 + [np.arange(2).repeat(500)], + ) + + s = Series(np.arange(1000), index=index) + result = s.unstack() + assert result.shape == (500, 2) + + # test roundtrip + stacked = result.stack() + tm.assert_series_equal(s, stacked.reindex(s.index)) + + # put it at beginning + index = MultiIndex( + levels=[[0, 1]] + [level] * 8, + codes=[np.arange(2).repeat(500)] + [codes] * 8, + ) + + s = Series(np.arange(1000), index=index) + result = s.unstack(0) + assert result.shape == (500, 2) + + # put it in middle + index = MultiIndex( + levels=[level] * 4 + [[0, 1]] + [level] * 4, + codes=([codes] * 4 + [np.arange(2).repeat(500)] + [codes] * 4), + ) + + s = Series(np.arange(1000), index=index) + result = s.unstack(4) + assert result.shape == (500, 2) diff --git a/pandas/tests/reshape/merge/test_pivot_old.py b/pandas/tests/reshape/merge/test_pivot_old.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index cfe969b5f61bb..603f81f6fc6d4 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -2063,3 +2063,94 @@ def agg(l): foo = DataFrame({"X": [0, 0, 1, 1], "Y": [0, 1, 0, 1], "Z": [10, 20, 30, 40]}) with pytest.raises(KeyError, match="notpresent"): foo.pivot_table("notpresent", "X", "Y", aggfunc=agg) + + +class TestPivot: + def test_pivot(self): + data = { + "index": ["A", "B", "C", "C", "B", "A"], + "columns": ["One", "One", "One", "Two", "Two", "Two"], + "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], + } + + frame = DataFrame(data) + pivoted = frame.pivot(index="index", columns="columns", values="values") + + expected = DataFrame( + { + "One": {"A": 1.0, "B": 2.0, "C": 3.0}, + "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, + } + ) + + expected.index.name, expected.columns.name = "index", "columns" + tm.assert_frame_equal(pivoted, expected) + + # name tracking + assert pivoted.index.name == "index" + assert pivoted.columns.name == "columns" + + # don't specify values + pivoted = frame.pivot(index="index", columns="columns") + assert pivoted.index.name == "index" + assert pivoted.columns.names == (None, "columns") + + def test_pivot_duplicates(self): + data = DataFrame( + { + "a": ["bar", "bar", "foo", "foo", "foo"], + "b": ["one", "two", "one", "one", "two"], + "c": [1.0, 2.0, 3.0, 3.0, 4.0], + } + ) + with pytest.raises(ValueError, match="duplicate entries"): + data.pivot("a", "b", "c") + + def test_pivot_empty(self): + df = DataFrame(columns=["a", "b", "c"]) + result = df.pivot("a", "b", "c") + expected = DataFrame() + tm.assert_frame_equal(result, expected, check_names=False) + + def test_pivot_integer_bug(self): + df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")]) + + result = df.pivot(index=1, columns=0, values=2) + repr(result) + tm.assert_index_equal(result.columns, Index(["A", "B"], name=0)) + + def test_pivot_index_none(self): + # GH#3962 + data = { + "index": ["A", "B", "C", "C", "B", "A"], + "columns": ["One", "One", "One", "Two", "Two", "Two"], + "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], + } + + frame = DataFrame(data).set_index("index") + result = frame.pivot(columns="columns", values="values") + expected = DataFrame( + { + "One": {"A": 1.0, "B": 2.0, "C": 3.0}, + "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, + } + ) + + expected.index.name, expected.columns.name = "index", "columns" + tm.assert_frame_equal(result, expected) + + # omit values + result = frame.pivot(columns="columns") + + expected.columns = pd.MultiIndex.from_tuples( + [("values", "One"), ("values", "Two")], names=[None, "columns"] + ) + expected.index.name = "index" + tm.assert_frame_equal(result, expected, check_names=False) + assert result.index.name == "index" + assert result.columns.names == (None, "columns") + expected.columns = expected.columns.droplevel(0) + result = frame.pivot(columns="columns", values="values") + + expected.columns.name = "columns" + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 909839dd5605e..f3d1f949c1475 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1,6 +1,5 @@ import datetime from io import StringIO -import itertools from itertools import product import numpy as np @@ -252,253 +251,6 @@ def test_get_level_number_out_of_bounds(self, multiindex_dataframe_random_data): with pytest.raises(IndexError, match="not a valid level number"): frame.index._get_level_number(-3) - def test_unstack(self, multiindex_year_month_day_dataframe_random_data): - # just check that it works for now - ymd = multiindex_year_month_day_dataframe_random_data - - unstacked = ymd.unstack() - unstacked.unstack() - - # test that ints work - ymd.astype(int).unstack() - - # test that int32 work - ymd.astype(np.int32).unstack() - - @pytest.mark.parametrize( - "result_rows,result_columns,index_product,expected_row", - [ - ( - [[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]], - ["ix1", "ix2", "col1", "col2", "col3", "col4"], - 2, - [None, None, 30.0, None], - ), - ( - [[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]], - ["ix1", "ix2", "col1", "col2", "col3"], - 2, - [None, None, 30.0], - ), - ( - [[1, 1, None, None, 30.0], [2, None, None, None, 30.0]], - ["ix1", "ix2", "col1", "col2", "col3"], - None, - [None, None, 30.0], - ), - ], - ) - def test_unstack_partial( - self, result_rows, result_columns, index_product, expected_row - ): - # check for regressions on this issue: - # https://github.com/pandas-dev/pandas/issues/19351 - # make sure DataFrame.unstack() works when its run on a subset of the DataFrame - # and the Index levels contain values that are not present in the subset - result = DataFrame(result_rows, columns=result_columns).set_index( - ["ix1", "ix2"] - ) - result = result.iloc[1:2].unstack("ix2") - expected = DataFrame( - [expected_row], - columns=pd.MultiIndex.from_product( - [result_columns[2:], [index_product]], names=[None, "ix2"] - ), - index=pd.Index([2], name="ix1"), - ) - tm.assert_frame_equal(result, expected) - - def test_unstack_multiple_no_empty_columns(self): - index = MultiIndex.from_tuples( - [(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)] - ) - - s = Series(np.random.randn(4), index=index) - - unstacked = s.unstack([1, 2]) - expected = unstacked.dropna(axis=1, how="all") - tm.assert_frame_equal(unstacked, expected) - - def test_stack(self, multiindex_year_month_day_dataframe_random_data): - ymd = multiindex_year_month_day_dataframe_random_data - - # regular roundtrip - unstacked = ymd.unstack() - restacked = unstacked.stack() - tm.assert_frame_equal(restacked, ymd) - - unlexsorted = ymd.sort_index(level=2) - - unstacked = unlexsorted.unstack(2) - restacked = unstacked.stack() - tm.assert_frame_equal(restacked.sort_index(level=0), ymd) - - unlexsorted = unlexsorted[::-1] - unstacked = unlexsorted.unstack(1) - restacked = unstacked.stack().swaplevel(1, 2) - tm.assert_frame_equal(restacked.sort_index(level=0), ymd) - - unlexsorted = unlexsorted.swaplevel(0, 1) - unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1) - restacked = unstacked.stack(0).swaplevel(1, 2) - tm.assert_frame_equal(restacked.sort_index(level=0), ymd) - - # columns unsorted - unstacked = ymd.unstack() - unstacked = unstacked.sort_index(axis=1, ascending=False) - restacked = unstacked.stack() - tm.assert_frame_equal(restacked, ymd) - - # more than 2 levels in the columns - unstacked = ymd.unstack(1).unstack(1) - - result = unstacked.stack(1) - expected = ymd.unstack() - tm.assert_frame_equal(result, expected) - - result = unstacked.stack(2) - expected = ymd.unstack(1) - tm.assert_frame_equal(result, expected) - - result = unstacked.stack(0) - expected = ymd.stack().unstack(1).unstack(1) - tm.assert_frame_equal(result, expected) - - # not all levels present in each echelon - unstacked = ymd.unstack(2).loc[:, ::3] - stacked = unstacked.stack().stack() - ymd_stacked = ymd.stack() - tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index)) - - # stack with negative number - result = ymd.unstack(0).stack(-2) - expected = ymd.unstack(0).stack(0) - - # GH10417 - def check(left, right): - tm.assert_series_equal(left, right) - assert left.index.is_unique is False - li, ri = left.index, right.index - tm.assert_index_equal(li, ri) - - df = DataFrame( - np.arange(12).reshape(4, 3), - index=list("abab"), - columns=["1st", "2nd", "3rd"], - ) - - mi = MultiIndex( - levels=[["a", "b"], ["1st", "2nd", "3rd"]], - codes=[np.tile(np.arange(2).repeat(3), 2), np.tile(np.arange(3), 4)], - ) - - left, right = df.stack(), Series(np.arange(12), index=mi) - check(left, right) - - df.columns = ["1st", "2nd", "1st"] - mi = MultiIndex( - levels=[["a", "b"], ["1st", "2nd"]], - codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)], - ) - - left, right = df.stack(), Series(np.arange(12), index=mi) - check(left, right) - - tpls = ("a", 2), ("b", 1), ("a", 1), ("b", 2) - df.index = MultiIndex.from_tuples(tpls) - mi = MultiIndex( - levels=[["a", "b"], [1, 2], ["1st", "2nd"]], - codes=[ - np.tile(np.arange(2).repeat(3), 2), - np.repeat([1, 0, 1], [3, 6, 3]), - np.tile([0, 1, 0], 4), - ], - ) - - left, right = df.stack(), Series(np.arange(12), index=mi) - check(left, right) - - def test_unstack_odd_failure(self): - data = """day,time,smoker,sum,len -Fri,Dinner,No,8.25,3. -Fri,Dinner,Yes,27.03,9 -Fri,Lunch,No,3.0,1 -Fri,Lunch,Yes,13.68,6 -Sat,Dinner,No,139.63,45 -Sat,Dinner,Yes,120.77,42 -Sun,Dinner,No,180.57,57 -Sun,Dinner,Yes,66.82,19 -Thur,Dinner,No,3.0,1 -Thur,Lunch,No,117.32,44 -Thur,Lunch,Yes,51.51,17""" - - df = pd.read_csv(StringIO(data)).set_index(["day", "time", "smoker"]) - - # it works, #2100 - result = df.unstack(2) - - recons = result.stack() - tm.assert_frame_equal(recons, df) - - def test_stack_mixed_dtype(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - - df = frame.T - df["foo", "four"] = "foo" - df = df.sort_index(level=1, axis=1) - - stacked = df.stack() - result = df["foo"].stack().sort_index() - tm.assert_series_equal(stacked["foo"], result, check_names=False) - assert result.name is None - assert stacked["bar"].dtype == np.float_ - - def test_unstack_bug(self): - df = DataFrame( - { - "state": ["naive", "naive", "naive", "activ", "activ", "activ"], - "exp": ["a", "b", "b", "b", "a", "a"], - "barcode": [1, 2, 3, 4, 1, 3], - "v": ["hi", "hi", "bye", "bye", "bye", "peace"], - "extra": np.arange(6.0), - } - ) - - result = df.groupby(["state", "exp", "barcode", "v"]).apply(len) - - unstacked = result.unstack() - restacked = unstacked.stack() - tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float)) - - def test_stack_unstack_preserve_names(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - - unstacked = frame.unstack() - assert unstacked.index.name == "first" - assert unstacked.columns.names == ["exp", "second"] - - restacked = unstacked.stack() - assert restacked.index.names == frame.index.names - - @pytest.mark.parametrize("method", ["stack", "unstack"]) - def test_stack_unstack_wrong_level_name( - self, method, multiindex_dataframe_random_data - ): - # GH 18303 - wrong level name should raise - frame = multiindex_dataframe_random_data - - # A DataFrame with flat axes: - df = frame.loc["foo"] - - with pytest.raises(KeyError, match="does not match index name"): - getattr(df, method)("mistake") - - if method == "unstack": - # Same on a Series: - s = df.iloc[:, 0] - with pytest.raises(KeyError, match="does not match index name"): - getattr(s, method)("mistake") - def test_unused_level_raises(self): # GH 20410 mi = MultiIndex( @@ -510,241 +262,6 @@ def test_unused_level_raises(self): with pytest.raises(KeyError, match="notevenone"): df["notevenone"] - def test_unstack_level_name(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - - result = frame.unstack("second") - expected = frame.unstack(level=1) - tm.assert_frame_equal(result, expected) - - def test_stack_level_name(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - - unstacked = frame.unstack("second") - result = unstacked.stack("exp") - expected = frame.unstack().stack(0) - tm.assert_frame_equal(result, expected) - - result = frame.stack("exp") - expected = frame.stack() - tm.assert_series_equal(result, expected) - - def test_stack_unstack_multiple( - self, multiindex_year_month_day_dataframe_random_data - ): - ymd = multiindex_year_month_day_dataframe_random_data - - unstacked = ymd.unstack(["year", "month"]) - expected = ymd.unstack("year").unstack("month") - tm.assert_frame_equal(unstacked, expected) - assert unstacked.columns.names == expected.columns.names - - # series - s = ymd["A"] - s_unstacked = s.unstack(["year", "month"]) - tm.assert_frame_equal(s_unstacked, expected["A"]) - - restacked = unstacked.stack(["year", "month"]) - restacked = restacked.swaplevel(0, 1).swaplevel(1, 2) - restacked = restacked.sort_index(level=0) - - tm.assert_frame_equal(restacked, ymd) - assert restacked.index.names == ymd.index.names - - # GH #451 - unstacked = ymd.unstack([1, 2]) - expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all") - tm.assert_frame_equal(unstacked, expected) - - unstacked = ymd.unstack([2, 1]) - expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all") - tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns]) - - def test_stack_names_and_numbers( - self, multiindex_year_month_day_dataframe_random_data - ): - ymd = multiindex_year_month_day_dataframe_random_data - - unstacked = ymd.unstack(["year", "month"]) - - # Can't use mixture of names and numbers to stack - with pytest.raises(ValueError, match="level should contain"): - unstacked.stack([0, "month"]) - - def test_stack_multiple_out_of_bounds( - self, multiindex_year_month_day_dataframe_random_data - ): - # nlevels == 3 - ymd = multiindex_year_month_day_dataframe_random_data - - unstacked = ymd.unstack(["year", "month"]) - - with pytest.raises(IndexError, match="Too many levels"): - unstacked.stack([2, 3]) - with pytest.raises(IndexError, match="not a valid level number"): - unstacked.stack([-4, -3]) - - def test_unstack_period_series(self): - # GH 4342 - idx1 = pd.PeriodIndex( - ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"], - freq="M", - name="period", - ) - idx2 = Index(["A", "B"] * 3, name="str") - value = [1, 2, 3, 4, 5, 6] - - idx = MultiIndex.from_arrays([idx1, idx2]) - s = Series(value, index=idx) - - result1 = s.unstack() - result2 = s.unstack(level=1) - result3 = s.unstack(level=0) - - e_idx = pd.PeriodIndex( - ["2013-01", "2013-02", "2013-03"], freq="M", name="period" - ) - expected = DataFrame( - {"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"] - ) - expected.columns.name = "str" - - tm.assert_frame_equal(result1, expected) - tm.assert_frame_equal(result2, expected) - tm.assert_frame_equal(result3, expected.T) - - idx1 = pd.PeriodIndex( - ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"], - freq="M", - name="period1", - ) - - idx2 = pd.PeriodIndex( - ["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"], - freq="M", - name="period2", - ) - idx = MultiIndex.from_arrays([idx1, idx2]) - s = Series(value, index=idx) - - result1 = s.unstack() - result2 = s.unstack(level=1) - result3 = s.unstack(level=0) - - e_idx = pd.PeriodIndex( - ["2013-01", "2013-02", "2013-03"], freq="M", name="period1" - ) - e_cols = pd.PeriodIndex( - ["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"], - freq="M", - name="period2", - ) - expected = DataFrame( - [ - [np.nan, np.nan, np.nan, np.nan, 2, 1], - [np.nan, np.nan, 4, 3, np.nan, np.nan], - [6, 5, np.nan, np.nan, np.nan, np.nan], - ], - index=e_idx, - columns=e_cols, - ) - - tm.assert_frame_equal(result1, expected) - tm.assert_frame_equal(result2, expected) - tm.assert_frame_equal(result3, expected.T) - - def test_unstack_period_frame(self): - # GH 4342 - idx1 = pd.PeriodIndex( - ["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"], - freq="M", - name="period1", - ) - idx2 = pd.PeriodIndex( - ["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"], - freq="M", - name="period2", - ) - value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]} - idx = MultiIndex.from_arrays([idx1, idx2]) - df = DataFrame(value, index=idx) - - result1 = df.unstack() - result2 = df.unstack(level=1) - result3 = df.unstack(level=0) - - e_1 = pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1") - e_2 = pd.PeriodIndex( - ["2013-10", "2013-12", "2014-02", "2013-10", "2013-12", "2014-02"], - freq="M", - name="period2", - ) - e_cols = MultiIndex.from_arrays(["A A A B B B".split(), e_2]) - expected = DataFrame( - [[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], index=e_1, columns=e_cols - ) - - tm.assert_frame_equal(result1, expected) - tm.assert_frame_equal(result2, expected) - - e_1 = pd.PeriodIndex( - ["2014-01", "2014-02", "2014-01", "2014-02"], freq="M", name="period1" - ) - e_2 = pd.PeriodIndex( - ["2013-10", "2013-12", "2014-02"], freq="M", name="period2" - ) - e_cols = MultiIndex.from_arrays(["A A B B".split(), e_1]) - expected = DataFrame( - [[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], index=e_2, columns=e_cols - ) - - tm.assert_frame_equal(result3, expected) - - def test_stack_multiple_bug(self): - """ bug when some uniques are not present in the data #3170""" - id_col = ([1] * 3) + ([2] * 3) - name = (["a"] * 3) + (["b"] * 3) - date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2) - var1 = np.random.randint(0, 100, 6) - df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1)) - - multi = df.set_index(["DATE", "ID"]) - multi.columns.name = "Params" - unst = multi.unstack("ID") - down = unst.resample("W-THU").mean() - - rs = down.stack("ID") - xp = unst.loc[:, ["VAR1"]].resample("W-THU").mean().stack("ID") - xp.columns.name = "Params" - tm.assert_frame_equal(rs, xp) - - def test_stack_dropna(self): - # GH #3997 - df = DataFrame({"A": ["a1", "a2"], "B": ["b1", "b2"], "C": [1, 1]}) - df = df.set_index(["A", "B"]) - - stacked = df.unstack().stack(dropna=False) - assert len(stacked) > len(stacked.dropna()) - - stacked = df.unstack().stack(dropna=True) - tm.assert_frame_equal(stacked, stacked.dropna()) - - def test_unstack_multiple_hierarchical(self): - df = DataFrame( - index=[ - [0, 0, 0, 0, 1, 1, 1, 1], - [0, 0, 1, 1, 0, 0, 1, 1], - [0, 1, 0, 1, 0, 1, 0, 1], - ], - columns=[[0, 0, 1, 1], [0, 1, 0, 1]], - ) - - df.index.names = ["a", "b", "c"] - df.columns.names = ["d", "e"] - - # it works! - df.unstack(["b", "c"]) - def test_groupby_transform(self, multiindex_dataframe_random_data): frame = multiindex_dataframe_random_data @@ -758,109 +275,6 @@ def test_groupby_transform(self, multiindex_dataframe_random_data): result = applied.reindex(expected.index) tm.assert_series_equal(result, expected, check_names=False) - def test_unstack_sparse_keyspace(self): - # memory problems with naive impl #2278 - # Generate Long File & Test Pivot - NUM_ROWS = 1000 - - df = DataFrame( - { - "A": np.random.randint(100, size=NUM_ROWS), - "B": np.random.randint(300, size=NUM_ROWS), - "C": np.random.randint(-7, 7, size=NUM_ROWS), - "D": np.random.randint(-19, 19, size=NUM_ROWS), - "E": np.random.randint(3000, size=NUM_ROWS), - "F": np.random.randn(NUM_ROWS), - } - ) - - idf = df.set_index(["A", "B", "C", "D", "E"]) - - # it works! is sufficient - idf.unstack("E") - - def test_unstack_unobserved_keys(self): - # related to #2278 refactoring - levels = [[0, 1], [0, 1, 2, 3]] - codes = [[0, 0, 1, 1], [0, 2, 0, 2]] - - index = MultiIndex(levels, codes) - - df = DataFrame(np.random.randn(4, 2), index=index) - - result = df.unstack() - assert len(result.columns) == 4 - - recons = result.stack() - tm.assert_frame_equal(recons, df) - - @pytest.mark.slow - def test_unstack_number_of_levels_larger_than_int32(self): - # GH 20601 - df = DataFrame( - np.random.randn(2 ** 16, 2), index=[np.arange(2 ** 16), np.arange(2 ** 16)] - ) - with pytest.raises(ValueError, match="int32 overflow"): - df.unstack() - - def test_stack_order_with_unsorted_levels(self): - # GH 16323 - - def manual_compare_stacked(df, df_stacked, lev0, lev1): - assert all( - df.loc[row, col] == df_stacked.loc[(row, col[lev0]), col[lev1]] - for row in df.index - for col in df.columns - ) - - # deep check for 1-row case - for width in [2, 3]: - levels_poss = itertools.product( - itertools.permutations([0, 1, 2], width), repeat=2 - ) - - for levels in levels_poss: - columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) - df = DataFrame(columns=columns, data=[range(4)]) - for stack_lev in range(2): - df_stacked = df.stack(stack_lev) - manual_compare_stacked(df, df_stacked, stack_lev, 1 - stack_lev) - - # check multi-row case - mi = MultiIndex( - levels=[["A", "C", "B"], ["B", "A", "C"]], - codes=[np.repeat(range(3), 3), np.tile(range(3), 3)], - ) - df = DataFrame( - columns=mi, index=range(5), data=np.arange(5 * len(mi)).reshape(5, -1) - ) - manual_compare_stacked(df, df.stack(0), 0, 1) - - def test_stack_unstack_unordered_multiindex(self): - # GH 18265 - values = np.arange(5) - data = np.vstack( - [ - [f"b{x}" for x in values], # b0, b1, .. - [f"a{x}" for x in values], # a0, a1, .. - ] - ) - df = DataFrame(data.T, columns=["b", "a"]) - df.columns.name = "first" - second_level_dict = {"x": df} - multi_level_df = pd.concat(second_level_dict, axis=1) - multi_level_df.columns.names = ["second", "first"] - df = multi_level_df.reindex(sorted(multi_level_df.columns), axis=1) - result = df.stack(["first", "second"]).unstack(["first", "second"]) - expected = DataFrame( - [["a0", "b0"], ["a1", "b1"], ["a2", "b2"], ["a3", "b3"], ["a4", "b4"]], - index=[0, 1, 2, 3, 4], - columns=MultiIndex.from_tuples( - [("a", "x"), ("b", "x")], names=["first", "second"] - ), - ) - tm.assert_frame_equal(result, expected) - def test_groupby_corner(self): midx = MultiIndex( levels=[["foo"], ["bar"], ["baz"]], @@ -1130,56 +544,6 @@ def test_loc_preserve_names(self, multiindex_year_month_day_dataframe_random_dat assert result.index.name == ymd.index.names[2] assert result2.index.name == ymd.index.names[2] - def test_unstack_preserve_types( - self, multiindex_year_month_day_dataframe_random_data - ): - # GH #403 - ymd = multiindex_year_month_day_dataframe_random_data - ymd["E"] = "foo" - ymd["F"] = 2 - - unstacked = ymd.unstack("month") - assert unstacked["A", 1].dtype == np.float64 - assert unstacked["E", 1].dtype == np.object_ - assert unstacked["F", 1].dtype == np.float64 - - def test_unstack_group_index_overflow(self): - codes = np.tile(np.arange(500), 2) - level = np.arange(500) - - index = MultiIndex( - levels=[level] * 8 + [[0, 1]], - codes=[codes] * 8 + [np.arange(2).repeat(500)], - ) - - s = Series(np.arange(1000), index=index) - result = s.unstack() - assert result.shape == (500, 2) - - # test roundtrip - stacked = result.stack() - tm.assert_series_equal(s, stacked.reindex(s.index)) - - # put it at beginning - index = MultiIndex( - levels=[[0, 1]] + [level] * 8, - codes=[np.arange(2).repeat(500)] + [codes] * 8, - ) - - s = Series(np.arange(1000), index=index) - result = s.unstack(0) - assert result.shape == (500, 2) - - # put it in middle - index = MultiIndex( - levels=[level] * 4 + [[0, 1]] + [level] * 4, - codes=([codes] * 4 + [np.arange(2).repeat(500)] + [codes] * 4), - ) - - s = Series(np.arange(1000), index=index) - result = s.unstack(4) - assert result.shape == (500, 2) - def test_to_html(self, multiindex_year_month_day_dataframe_random_data): ymd = multiindex_year_month_day_dataframe_random_data
https://api.github.com/repos/pandas-dev/pandas/pulls/37314
2020-10-21T15:47:36Z
2020-10-22T00:18:10Z
2020-10-22T00:18:10Z
2020-10-22T01:13:12Z
CLN: always rebox_native in DatetimeLikeArray
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index f2a0173c0d593..03cf97bde774e 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -480,8 +480,7 @@ def _validate_fill_value(self, fill_value): fill_value = self._validate_scalar(fill_value, msg) except TypeError as err: raise ValueError(msg) from err - rv = self._unbox(fill_value) - return self._rebox_native(rv) + return self._unbox(fill_value) def _validate_shift_value(self, fill_value): # TODO(2.0): once this deprecation is enforced, use _validate_fill_value @@ -508,8 +507,7 @@ def _validate_shift_value(self, fill_value): ) fill_value = new_fill - rv = self._unbox(fill_value) - return self._rebox_native(rv) + return self._unbox(fill_value) def _validate_scalar(self, value, msg: Optional[str] = None): """ @@ -591,8 +589,7 @@ def _validate_searchsorted_value(self, value): else: value = self._validate_listlike(value) - rv = self._unbox(value) - return self._rebox_native(rv) + return self._unbox(value) def _validate_setitem_value(self, value): msg = ( @@ -604,15 +601,13 @@ def _validate_setitem_value(self, value): else: value = self._validate_scalar(value, msg) - rv = self._unbox(value, setitem=True) - return self._rebox_native(rv) + return self._unbox(value, setitem=True) def _validate_insert_value(self, value): msg = f"cannot insert {type(self).__name__} with incompatible label" value = self._validate_scalar(value, msg) - rv = self._unbox(value, setitem=True) - return self._rebox_native(rv) + return self._unbox(value, setitem=True) def _validate_where_value(self, other): msg = f"Where requires matching dtype, not {type(other)}" @@ -621,19 +616,21 @@ def _validate_where_value(self, other): else: other = self._validate_listlike(other) - rv = self._unbox(other, setitem=True) - return self._rebox_native(rv) + return self._unbox(other, setitem=True) - def _unbox(self, other, setitem: bool = False) -> Union[np.int64, np.ndarray]: + def _unbox( + self, other, setitem: bool = False + ) -> Union[np.int64, np.datetime64, np.timedelta64, np.ndarray]: """ Unbox either a scalar with _unbox_scalar or an instance of our own type. """ if lib.is_scalar(other): other = self._unbox_scalar(other, setitem=setitem) + other = self._rebox_native(other) else: # same type as self self._check_compatible_with(other, setitem=setitem) - other = other.view("i8") + other = other._ndarray return other # ------------------------------------------------------------------ @@ -862,8 +859,8 @@ def _cmp_method(self, other, op): ) return result - other_i8 = self._unbox(other) - result = op(self.asi8, other_i8) + other_vals = self._unbox(other) + result = op(self._ndarray, other_vals) o_mask = isna(other) if self._hasnans | np.any(o_mask): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index f67d1ec0aa65d..76d001d2f3ce6 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -412,8 +412,8 @@ def _partial_date_slice( self._validate_partial_date_slice(reso) t1, t2 = self._parsed_string_to_bounds(reso, parsed) - i8vals = self.asi8 - unbox = self._data._unbox_scalar + vals = self._data._ndarray + unbox = self._data._unbox if self.is_monotonic: @@ -426,14 +426,13 @@ def _partial_date_slice( # TODO: does this depend on being monotonic _increasing_? # a monotonic (sorted) series can be sliced - # Use asi8.searchsorted to avoid re-validating Periods/Timestamps - left = i8vals.searchsorted(unbox(t1), side="left") - right = i8vals.searchsorted(unbox(t2), side="right") + left = vals.searchsorted(unbox(t1), side="left") + right = vals.searchsorted(unbox(t2), side="right") return slice(left, right) else: - lhs_mask = i8vals >= unbox(t1) - rhs_mask = i8vals <= unbox(t2) + lhs_mask = vals >= unbox(t1) + rhs_mask = vals <= unbox(t2) # try to find the dates return (lhs_mask & rhs_mask).nonzero()[0]
https://api.github.com/repos/pandas-dev/pandas/pulls/37313
2020-10-21T15:34:22Z
2020-10-22T00:05:19Z
2020-10-22T00:05:19Z
2020-12-02T18:59:59Z
CI: troubleshoot travis-36-cov on 1.1.x
diff --git a/.travis.yml b/.travis.yml index f43f4a1d16ff8..f16b19814874e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -46,7 +46,7 @@ matrix: # Enabling Deprecations when running tests # PANDAS_TESTING_MODE="deprecate" causes DeprecationWarning messages to be displayed in the logs # See pandas/_testing.py for more details. - - JOB="3.6, coverage" ENV_FILE="ci/deps/travis-36-cov.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true SQL="1" + - JOB="3.7, coverage" ENV_FILE="ci/deps/travis-37-cov.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true SQL="1" services: - mysql - postgresql diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-37-cov.yaml similarity index 79% rename from ci/deps/travis-36-cov.yaml rename to ci/deps/travis-37-cov.yaml index 8c8db106af05c..c89b42ef06a2e 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-37-cov.yaml @@ -1,9 +1,8 @@ name: pandas-dev channels: - - defaults - conda-forge dependencies: - - python=3.6.* + - python=3.7.* # tools - cython>=0.29.21 @@ -22,18 +21,21 @@ dependencies: - geopandas - html5lib - matplotlib - - moto + - moto>=1.3.14 + - flask - nomkl - numexpr - - numpy=1.15.* + - numpy=1.16.* - odfpy - openpyxl - pandas-gbq + - google-cloud-bigquery>=1.27.2 # GH 36436 - psycopg2 - - pyarrow>=0.13.0 - - pymysql + - pyarrow>=0.15.0 + - pymysql<0.10.0 # temporary pin, GH 36465 - pytables - python-snappy + - python-dateutil - pytz - s3fs>=0.4.0 - scikit-learn @@ -49,5 +51,4 @@ dependencies: - brotlipy - coverage - pandas-datareader - - python-dateutil - pyxlsb diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 0d447a70b540d..eb3847e9ac19f 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -3,8 +3,6 @@ import numpy as np import pytest -from pandas.compat import PY37, is_platform_windows - import pandas as pd from pandas import ( Categorical, @@ -13,7 +11,6 @@ Index, MultiIndex, Series, - _np_version_under1p17, qcut, ) import pandas._testing as tm @@ -244,12 +241,6 @@ def test_level_get_group(observed): tm.assert_frame_equal(result, expected) -# GH#21636 flaky on py37; may be related to older numpy, see discussion -# https://github.com/MacPython/pandas-wheels/pull/64 -@pytest.mark.xfail( - PY37 and _np_version_under1p17 and not is_platform_windows(), - reason="Flaky, GH-27902", -) @pytest.mark.parametrize("ordered", [True, False]) def test_apply(ordered): # GH 10138
closes #37309
https://api.github.com/repos/pandas-dev/pandas/pulls/37311
2020-10-21T13:37:23Z
2020-11-26T13:59:15Z
2020-11-26T13:59:15Z
2020-11-26T13:59:20Z
BUG: stabilize sort_values algorithms for Series and time-like Indices
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index f9e6a86e4f02d..3937b6e281a42 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -309,6 +309,13 @@ Optional libraries below the lowest tested version may still work, but are not c See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more. +.. _whatsnew_200.api.other: + +Other API changes +^^^^^^^^^^^^^^^^^ + +- Sorting in descending order is now stable for :meth:`Series.sort_values` and :meth:`Index.sort_values` for DateTime-like :class:`Index` subclasses. This will affect sort order when sorting :class:`DataFrame` on multiple columns, sorting with a key function that produces duplicates, or requesting the sorting index when using :meth:`Index.sort_values`. When using :meth:`Series.value_counts`, count of missing values is no longer the last in the list of duplicate counts, and its position corresponds to the position in the original :class:`Series`. When using :meth:`Index.sort_values` for DateTime-like :class:`Index` subclasses, NaTs ignored the ``na_position`` argument and were sorted to the beggining. Now they respect ``na_position``, the default being ``last``, same as other :class:`Index` subclasses. (:issue:`35992`) + .. --------------------------------------------------------------------------- .. _whatsnew_120.deprecations: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index a310ec5312cf4..e9e04ace784b6 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1181,10 +1181,8 @@ def compute(self, method: str) -> Series: # slow method if n >= len(self.obj): - reverse_it = self.keep == "last" or method == "nlargest" ascending = method == "nsmallest" - slc = np.s_[::-1] if reverse_it else np.s_[:] - return dropped[slc].sort_values(ascending=ascending).head(n) + return dropped.sort_values(ascending=ascending).head(n) # fast method arr, pandas_dtype = _ensure_data(dropped.values) diff --git a/pandas/core/base.py b/pandas/core/base.py index a22aac926e36e..c91e4db004f2a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -933,9 +933,9 @@ def value_counts( >>> index = pd.Index([3, 1, 2, 3, 4, np.nan]) >>> index.value_counts() 3.0 2 - 4.0 1 - 2.0 1 1.0 1 + 2.0 1 + 4.0 1 dtype: int64 With `normalize` set to `True`, returns the relative frequency by @@ -944,9 +944,9 @@ def value_counts( >>> s = pd.Series([3, 1, 2, 3, 4, np.nan]) >>> s.value_counts(normalize=True) 3.0 0.4 - 4.0 0.2 - 2.0 0.2 1.0 0.2 + 2.0 0.2 + 4.0 0.2 dtype: float64 **bins** @@ -957,8 +957,8 @@ def value_counts( number of half-open bins. >>> s.value_counts(bins=3) - (2.0, 3.0] 2 (0.996, 2.0] 2 + (2.0, 3.0] 2 (3.0, 4.0] 1 dtype: int64 @@ -968,10 +968,10 @@ def value_counts( >>> s.value_counts(dropna=False) 3.0 2 - NaN 1 - 4.0 1 - 2.0 1 1.0 1 + 2.0 1 + 4.0 1 + NaN 1 dtype: int64 """ result = value_counts( diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c13164a4c4f85..5134529d9c21f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5563,8 +5563,8 @@ def value_counts( >>> df.value_counts() num_legs num_wings 4 0 2 - 6 0 1 2 2 1 + 6 0 1 dtype: int64 >>> df.value_counts(sort=False) @@ -5584,8 +5584,8 @@ def value_counts( >>> df.value_counts(normalize=True) num_legs num_wings 4 0 0.50 - 6 0 0.25 2 2 0.25 + 6 0 0.25 dtype: float64 """ if subset is None: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c4ca34c3b74a6..c90ab9cceea8c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10106,7 +10106,7 @@ def describe( categorical count 3 unique 3 - top f + top d freq 1 Excluding numeric columns from a ``DataFrame`` description. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index d1a52011a3ad7..20a17c27fe282 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4542,9 +4542,7 @@ def sort_values( # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex - if not isinstance( - self, (ABCMultiIndex, ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex) - ): + if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) diff --git a/pandas/core/series.py b/pandas/core/series.py index 2cd861cc11b28..19d07a8c5e6bf 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -92,7 +92,7 @@ from pandas.core.indexing import check_bool_indexer from pandas.core.internals import SingleBlockManager from pandas.core.shared_docs import _shared_docs -from pandas.core.sorting import ensure_key_mapped +from pandas.core.sorting import ensure_key_mapped, nargsort from pandas.core.strings import StringMethods from pandas.core.tools.datetimes import to_datetime @@ -3288,29 +3288,6 @@ def sort_values( "sort in-place you must create a copy" ) - def _try_kind_sort(arr): - arr = ensure_key_mapped(arr, key) - arr = getattr(arr, "_values", arr) - - # easier to ask forgiveness than permission - try: - # if kind==mergesort, it can fail for object dtype - return arr.argsort(kind=kind) - except TypeError: - # stable sort not available for object dtype - # uses the argsort default quicksort - return arr.argsort(kind="quicksort") - - arr = self._values - sorted_index = np.empty(len(self), dtype=np.int32) - - bad = isna(arr) - - good = ~bad - idx = ibase.default_index(len(self)) - - argsorted = _try_kind_sort(self[good]) - if is_list_like(ascending): if len(ascending) != 1: raise ValueError( @@ -3321,21 +3298,16 @@ def _try_kind_sort(arr): if not is_bool(ascending): raise ValueError("ascending must be boolean") - if not ascending: - argsorted = argsorted[::-1] - - if na_position == "last": - n = good.sum() - sorted_index[:n] = idx[good][argsorted] - sorted_index[n:] = idx[bad] - elif na_position == "first": - n = bad.sum() - sorted_index[n:] = idx[good][argsorted] - sorted_index[:n] = idx[bad] - else: + if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") - result = self._constructor(arr[sorted_index], index=self.index[sorted_index]) + # GH 35922. Make sorting stable by leveraging nargsort + values_to_sort = ensure_key_mapped(self, key)._values if key else self._values + sorted_index = nargsort(values_to_sort, kind, ascending, na_position) + + result = self._constructor( + self._values[sorted_index], index=self.index[sorted_index] + ) if ignore_index: result.index = ibase.default_index(len(sorted_index)) diff --git a/pandas/tests/arrays/boolean/test_function.py b/pandas/tests/arrays/boolean/test_function.py index 1547f08fa66b0..7665c350e3443 100644 --- a/pandas/tests/arrays/boolean/test_function.py +++ b/pandas/tests/arrays/boolean/test_function.py @@ -77,11 +77,11 @@ def test_ufunc_reduce_raises(values): def test_value_counts_na(): arr = pd.array([True, False, pd.NA], dtype="boolean") result = arr.value_counts(dropna=False) - expected = pd.Series([1, 1, 1], index=[True, False, pd.NA], dtype="Int64") + expected = pd.Series([1, 1, 1], index=[False, True, pd.NA], dtype="Int64") tm.assert_series_equal(result, expected) result = arr.value_counts(dropna=True) - expected = pd.Series([1, 1], index=[True, False], dtype="Int64") + expected = pd.Series([1, 1], index=[False, True], dtype="Int64") tm.assert_series_equal(result, expected) diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 56a8e21edd004..089bbcf4e0e3f 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -301,7 +301,7 @@ def test_arrow_roundtrip(): def test_value_counts_na(): arr = pd.array(["a", "b", "a", pd.NA], dtype="string") result = arr.value_counts(dropna=False) - expected = pd.Series([2, 1, 1], index=["a", "b", pd.NA], dtype="Int64") + expected = pd.Series([2, 1, 1], index=["a", pd.NA, "b"], dtype="Int64") tm.assert_series_equal(result, expected) result = arr.value_counts(dropna=True) diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py index def7e41e22fb1..1a6cba1ace35f 100644 --- a/pandas/tests/base/test_value_counts.py +++ b/pandas/tests/base/test_value_counts.py @@ -153,16 +153,16 @@ def test_value_counts_bins(index_or_series): # these return the same res4 = s1.value_counts(bins=4, dropna=True) intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) - exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2])) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2])) tm.assert_series_equal(res4, exp4) res4 = s1.value_counts(bins=4, dropna=False) intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) - exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2])) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2])) tm.assert_series_equal(res4, exp4) res4n = s1.value_counts(bins=4, normalize=True) - exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2])) + exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 1, 3, 2])) tm.assert_series_equal(res4n, exp4n) # handle NA's properly @@ -239,7 +239,11 @@ def test_value_counts_datetime64(index_or_series): tm.assert_series_equal(result, expected_s) result = s.value_counts(dropna=False) - expected_s[pd.NaT] = 1 + # GH 35922. NaN-like now sorts to the beginning of duplicate counts + idx = pd.to_datetime( + ["2010-01-01 00:00:00", "2008-09-09 00:00:00", pd.NaT, "2009-01-01 00:00:00"] + ) + expected_s = Series([3, 2, 1, 1], index=idx) tm.assert_series_equal(result, expected_s) unique = s.unique() diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 23e20a2c0903a..e973b1247941f 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -125,7 +125,11 @@ def test_sort_values(self, data_for_sorting, ascending, sort_by_key): result = ser.sort_values(ascending=ascending, key=sort_by_key) expected = ser.iloc[[2, 0, 1]] if not ascending: - expected = expected[::-1] + # GH 35922. Expect stable sort + if ser.nunique() == 2: + expected = ser.iloc[[0, 1, 2]] + else: + expected = ser.iloc[[1, 0, 2]] self.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py index a90781cf43c16..f77b7cd4a6c3b 100644 --- a/pandas/tests/frame/methods/test_describe.py +++ b/pandas/tests/frame/methods/test_describe.py @@ -56,7 +56,7 @@ def test_describe_bool_frame(self): ) result = df.describe() expected = DataFrame( - {"bool_data_1": [4, 2, True, 2], "bool_data_2": [4, 2, True, 3]}, + {"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) @@ -79,7 +79,7 @@ def test_describe_bool_frame(self): ) result = df.describe() expected = DataFrame( - {"bool_data": [4, 2, True, 2], "str_data": [4, 3, "a", 2]}, + {"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_value_counts.py b/pandas/tests/frame/methods/test_value_counts.py index c409b0bbe6fa9..23f9ebdb4479d 100644 --- a/pandas/tests/frame/methods/test_value_counts.py +++ b/pandas/tests/frame/methods/test_value_counts.py @@ -48,7 +48,7 @@ def test_data_frame_value_counts_default(): expected = pd.Series( data=[2, 1, 1], index=pd.MultiIndex.from_arrays( - [(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"] + [(4, 2, 6), (0, 2, 0)], names=["num_legs", "num_wings"] ), ) @@ -65,7 +65,7 @@ def test_data_frame_value_counts_normalize(): expected = pd.Series( data=[0.5, 0.25, 0.25], index=pd.MultiIndex.from_arrays( - [(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"] + [(4, 2, 6), (0, 2, 0)], names=["num_legs", "num_wings"] ), ) @@ -78,7 +78,7 @@ def test_data_frame_value_counts_single_col_default(): result = df.value_counts() expected = pd.Series( data=[2, 1, 1], - index=pd.MultiIndex.from_arrays([[4, 6, 2]], names=["num_legs"]), + index=pd.MultiIndex.from_arrays([[4, 2, 6]], names=["num_legs"]), ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 9cf0d2035fa67..1d64fde103e9e 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -231,7 +231,7 @@ def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture) index = DatetimeIndex(index_dates, tz=tz, name="idx") expected = DatetimeIndex(expected_dates, tz=tz, name="idx") - ordered = index.sort_values() + ordered = index.sort_values(na_position="first") tm.assert_index_equal(ordered, expected) assert ordered.freq is None @@ -239,7 +239,7 @@ def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture) tm.assert_index_equal(ordered, expected[::-1]) assert ordered.freq is None - ordered, indexer = index.sort_values(return_indexer=True) + ordered, indexer = index.sort_values(return_indexer=True, na_position="first") tm.assert_index_equal(ordered, expected) exp = np.array([0, 4, 3, 1, 2]) @@ -249,7 +249,7 @@ def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture) ordered, indexer = index.sort_values(return_indexer=True, ascending=False) tm.assert_index_equal(ordered, expected[::-1]) - exp = np.array([2, 1, 3, 4, 0]) + exp = np.array([2, 1, 3, 0, 4]) tm.assert_numpy_array_equal(indexer, exp, check_dtype=False) assert ordered.freq is None diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 74ca6ec59736b..10134b20e7d3e 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -178,7 +178,7 @@ def _check_freq(index, expected_index): pidx = PeriodIndex(["2011", "2013", "NaT", "2011"], name="pidx", freq="D") - result = pidx.sort_values() + result = pidx.sort_values(na_position="first") expected = PeriodIndex(["NaT", "2011", "2011", "2013"], name="pidx", freq="D") tm.assert_index_equal(result, expected) assert result.freq == "D" @@ -247,7 +247,7 @@ def test_order(self): ) for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]: - ordered = idx.sort_values() + ordered = idx.sort_values(na_position="first") tm.assert_index_equal(ordered, expected) assert ordered.freq == "D" @@ -255,7 +255,7 @@ def test_order(self): tm.assert_index_equal(ordered, expected[::-1]) assert ordered.freq == "D" - ordered, indexer = idx.sort_values(return_indexer=True) + ordered, indexer = idx.sort_values(return_indexer=True, na_position="first") tm.assert_index_equal(ordered, expected) exp = np.array([0, 4, 3, 1, 2]) @@ -265,7 +265,7 @@ def test_order(self): ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) tm.assert_index_equal(ordered, expected[::-1]) - exp = np.array([2, 1, 3, 4, 0]) + exp = np.array([2, 1, 3, 0, 4]) tm.assert_numpy_array_equal(indexer, exp, check_dtype=False) assert ordered.freq == "D" @@ -332,12 +332,8 @@ def test_freq_setter_deprecated(self): idx.freq = pd.offsets.Day() -@pytest.mark.xfail(reason="Datetime-like sort_values currently unstable (GH 35922)") def test_order_stability_compat(): - # GH 35584. The new implementation of sort_values for Index.sort_values - # is stable when sorting in descending order. Datetime-like sort_values - # currently aren't stable. xfail should be removed after - # the implementations' behavior is synchronized (xref GH 35922) + # GH 35922. sort_values is stable both for normal and datetime-like Index pidx = PeriodIndex(["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A") iidx = Index([2011, 2013, 2015, 2012, 2011], name="idx") ordered1, indexer1 = pidx.sort_values(return_indexer=True, ascending=False) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 6a681ede8ff42..d47582566fe94 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -13,14 +13,7 @@ from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd -from pandas import ( - CategoricalIndex, - DatetimeIndex, - MultiIndex, - PeriodIndex, - RangeIndex, - TimedeltaIndex, -) +from pandas import CategoricalIndex, MultiIndex, RangeIndex import pandas._testing as tm @@ -498,12 +491,7 @@ def test_ravel_deprecation(self, index): @pytest.mark.parametrize("na_position", [None, "middle"]) def test_sort_values_invalid_na_position(index_with_missing, na_position): - if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): - # datetime-like indices will get na_position kwarg as part of - # synchronizing duplicate-sorting behavior, because we currently expect - # them, other indices, and Series to sort differently (xref 35922) - pytest.xfail("sort_values does not support na_position kwarg") - elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): + if isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") if na_position not in ["first", "last"]: @@ -516,12 +504,7 @@ def test_sort_values_with_missing(index_with_missing, na_position): # GH 35584. Test that sort_values works with missing values, # sort non-missing and place missing according to na_position - if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): - # datetime-like indices will get na_position kwarg as part of - # synchronizing duplicate-sorting behavior, because we currently expect - # them, other indices, and Series to sort differently (xref 35922) - pytest.xfail("sort_values does not support na_position kwarg") - elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): + if isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") missing_count = np.sum(index_with_missing.isna()) diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index b74160e7e0635..15b94eafe2f27 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -134,7 +134,7 @@ def test_order(self): ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) tm.assert_index_equal(ordered, expected[::-1]) - exp = np.array([2, 1, 3, 4, 0]) + exp = np.array([2, 1, 3, 0, 4]) tm.assert_numpy_array_equal(indexer, exp, check_dtype=False) assert ordered.freq is None diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py index e4fdfb2838b70..f22b1be672190 100644 --- a/pandas/tests/series/methods/test_value_counts.py +++ b/pandas/tests/series/methods/test_value_counts.py @@ -185,7 +185,7 @@ def test_value_counts_categorical_with_nan(self): ( Series([False, True, True, pd.NA]), False, - Series([2, 1, 1], index=[True, False, pd.NA]), + Series([2, 1, 1], index=[True, pd.NA, False]), ), ( Series([False, True, True, pd.NA]), @@ -195,7 +195,7 @@ def test_value_counts_categorical_with_nan(self): ( Series(range(3), index=[True, False, np.nan]).index, False, - Series([1, 1, 1], index=[True, False, pd.NA]), + Series([1, 1, 1], index=[pd.NA, False, True]), ), ], ) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index aefbcee76b5d7..88286448de900 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1178,7 +1178,7 @@ def test_dropna(self): ) tm.assert_series_equal( Series([True, True, False, None]).value_counts(dropna=False), - Series([2, 1, 1], index=[True, False, np.nan]), + Series([2, 1, 1], index=[True, np.nan, False]), ) tm.assert_series_equal( Series([10.3, 5.0, 5.0]).value_counts(dropna=True), @@ -1197,7 +1197,7 @@ def test_dropna(self): # 32-bit linux has a different ordering if IS64: result = Series([10.3, 5.0, 5.0, None]).value_counts(dropna=False) - expected = Series([2, 1, 1], index=[5.0, 10.3, np.nan]) + expected = Series([2, 1, 1], index=[5.0, np.nan, 10.3]) tm.assert_series_equal(result, expected) def test_value_counts_normalized(self): @@ -1208,12 +1208,12 @@ def test_value_counts_normalized(self): s_typed = s.astype(t) result = s_typed.value_counts(normalize=True, dropna=False) expected = Series( - [0.6, 0.2, 0.2], index=Series([np.nan, 2.0, 1.0], dtype=t) + [0.6, 0.2, 0.2], index=Series([np.nan, 1.0, 2.0], dtype=t) ) tm.assert_series_equal(result, expected) result = s_typed.value_counts(normalize=True, dropna=True) - expected = Series([0.5, 0.5], index=Series([2.0, 1.0], dtype=t)) + expected = Series([0.5, 0.5], index=Series([1.0, 2.0], dtype=t)) tm.assert_series_equal(result, expected) def test_value_counts_uint64(self): @@ -1224,7 +1224,7 @@ def test_value_counts_uint64(self): tm.assert_series_equal(result, expected) arr = np.array([-1, 2 ** 63], dtype=object) - expected = Series([1, 1], index=[-1, 2 ** 63]) + expected = Series([1, 1], index=[2 ** 63, -1]) result = algos.value_counts(arr) # 32-bit linux has a different ordering
- [X] closes #35922 - [X] 18 tests changed / 18 passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry ## Problem Our `sort_values` functions currently behave differently for different objects: for most `Index` subclasses they are stable when sorting in descending order (this was introduced by #35604), but for DateTime-like `Index` subclasses and `Series` they are unstable. This isn't good as sorting should be stable across the board. ## Details Came across this one while introducing missing-value support to `Index.sort_values` in #35604, so I had to limit that PR to non-DateTime-like `Index` subclasses. The problem was that we had different expectations for sorting stability baked into our test suite, so unifying sorting algorithms and missing-value support needed a bunch of careful test changes and altering both `sort_values` and algorithms in `sorting.py`. Since this PR necessarily includes changes in several places, I have commented on all the changes made in the code and the unusual changes in the tests to make reviewing the code easier (see "On test changes" below). ## On test changes Most changes I made in the tests are for cases where we were expecting an unstable sort or expected NaNs to be sorted to the beginning of a list of duplicates for ascending sort and to the end for descending (we forced this by inserting NaN-likes at 0 position and reversing when sorting in descending order in `Series.sort_values`). ## Default behavior changes Since DateTime-like `Index` subclasses now support `na_position` using the same implementation as the other `Index` subclasses, they now sort missing values to the end of the `Index` by default. ## Performance Ran the full benchmark suite, and there are no performance regressions. ## Out-of-scope The only type I didn't touch so far is MultiIndex. It can't be sorted the same way through nargsort, and I don't think we should be doing it in this PR, if it all (stabilizing descending order MultiIndex.sort_values will definitely be a PITA, and it's a very narrow use case, in my opinion).
https://api.github.com/repos/pandas-dev/pandas/pulls/37310
2020-10-21T12:52:49Z
2020-10-31T14:49:14Z
2020-10-31T14:49:14Z
2020-12-23T17:48:20Z
Backport PR #37304 on branch 1.1.x (TST: correct parquet test expected partition column dtype for pyarrow 2.0)
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 306b2a7849586..6df13278fcb75 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -563,16 +563,20 @@ def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col): # read_table uses the new Arrow Datasets API since pyarrow 1.0.0 # Previous behaviour was pyarrow partitioned columns become 'category' dtypes # These are added to back of dataframe on read. In new API category dtype is - # only used if partition field is string. - legacy_read_table = LooseVersion(pyarrow.__version__) < LooseVersion("1.0.0") - if partition_col and legacy_read_table: - partition_col_type = "category" - else: - partition_col_type = "int32" - - expected_df[partition_col] = expected_df[partition_col].astype( - partition_col_type + # only used if partition field is string, but this changed again to use + # category dtype for all types (not only strings) in pyarrow 2.0.0 + pa10 = (LooseVersion(pyarrow.__version__) >= LooseVersion("1.0.0")) and ( + LooseVersion(pyarrow.__version__) < LooseVersion("2.0.0") ) + if partition_col: + if pa10: + partition_col_type = "int32" + else: + partition_col_type = "category" + + expected_df[partition_col] = expected_df[partition_col].astype( + partition_col_type + ) check_round_trip( df_compat,
Backport PR #37304: TST: correct parquet test expected partition column dtype for pyarrow 2.0
https://api.github.com/repos/pandas-dev/pandas/pulls/37308
2020-10-21T12:13:53Z
2020-10-21T13:20:26Z
2020-10-21T13:20:26Z
2020-10-21T13:20:26Z
Backport PR #37270 on branch 1.1.x: REGR: Make comparisons consistent for PeriodDtype
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index 7dd660374a6fc..8a9281ba7de99 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -20,6 +20,7 @@ Fixed regressions - Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`) - Fixed regression in :meth:`Series.astype` converting ``None`` to ``"nan"`` when casting to string (:issue:`36904`) - Fixed regression in :class:`RollingGroupby` causing a segmentation fault with Index of dtype object (:issue:`36727`) +- Fixed regression in :class:`PeriodDtype` comparing both equal and unequal to its string representation (:issue:`37265`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8350e136417b1..404f0b42f0e33 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -894,6 +894,9 @@ def __eq__(self, other: Any) -> bool: return isinstance(other, PeriodDtype) and self.freq == other.freq + def __ne__(self, other: Any) -> bool: + return not self.__eq__(other) + def __setstate__(self, state): # for pickle compat. __getstate__ is defined in the # PandasExtensionDtype superclass and uses the public properties to diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index a58dc5e5ec74a..f6cd500f911b2 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -991,3 +991,10 @@ def test_is_dtype_no_warning(check): with tm.assert_produces_warning(None): check(data["A"]) + + +def test_period_dtype_compare_to_string(): + # https://github.com/pandas-dev/pandas/issues/37265 + dtype = PeriodDtype(freq="M") + assert (dtype == "period[M]") is True + assert (dtype != "period[M]") is False
Backport PR #37270 on branch 1.1.x
https://api.github.com/repos/pandas-dev/pandas/pulls/37307
2020-10-21T11:35:52Z
2020-10-21T12:47:01Z
2020-10-21T12:47:01Z
2020-10-21T12:47:08Z
Backport PR #37198 on branch 1.1.x (BUG: Regression in Resample.apply raised error when apply affected only a Series)
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index ee76fe9398002..3e4e6f530c7a7 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -20,6 +20,7 @@ Fixed regressions - Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`) - Fixed regression in :meth:`Series.astype` converting ``None`` to ``"nan"`` when casting to string (:issue:`36904`) - Fixed regression in :class:`RollingGroupby` causing a segmentation fault with Index of dtype object (:issue:`36727`) +- Fixed regression in :meth:`DataFrame.resample(...).apply(...)` raised ``AttributeError`` when input was a :class:`DataFrame` and only a :class:`Series` was evaluated (:issue:`36951`) - Fixed regression in :class:`PeriodDtype` comparing both equal and unequal to its string representation (:issue:`37265`) - Fixed regression in certain offsets (:meth:`pd.offsets.Day() <pandas.tseries.offsets.Day>` and below) no longer being hashable (:issue:`37267`) - Fixed regression in :class:`StataReader` which required ``chunksize`` to be manually set when using an iterator to read a dataset (:issue:`37280`) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index bfdfc65723433..0dfbf96947c33 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -365,8 +365,9 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: result = grouped.aggregate(how, *args, **kwargs) - except DataError: + except (DataError, AttributeError, KeyError): # we have a non-reducing function; try to evaluate + # alternatively we want to evaluate only a column of the input result = grouped.apply(how, *args, **kwargs) except ValueError as err: if "Must produce aggregated value" in str(err): diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index b36b11582c1ec..a7232dd5f8a1e 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -346,3 +346,18 @@ def test_median_duplicate_columns(): result = df.resample("5s").median() expected.columns = result.columns tm.assert_frame_equal(result, expected) + + +def test_apply_to_one_column_of_df(): + # GH: 36951 + df = pd.DataFrame( + {"col": range(10), "col1": range(10, 20)}, + index=pd.date_range("2012-01-01", periods=10, freq="20min"), + ) + result = df.resample("H").apply(lambda group: group.col.sum()) + expected = pd.Series( + [3, 12, 21, 9], index=pd.date_range("2012-01-01", periods=4, freq="H") + ) + tm.assert_series_equal(result, expected) + result = df.resample("H").apply(lambda group: group["col"].sum()) + tm.assert_series_equal(result, expected)
Backport PR #37198: BUG: Regression in Resample.apply raised error when apply affected only a Series
https://api.github.com/repos/pandas-dev/pandas/pulls/37305
2020-10-21T10:46:47Z
2020-10-26T18:21:27Z
2020-10-26T18:21:27Z
2020-10-26T18:21:27Z
TST: correct parquet test expected partition column dtype for pyarrow 2.0
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 002271ead1e38..8d3d4cc347019 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -614,16 +614,20 @@ def test_s3_roundtrip_for_dir( # read_table uses the new Arrow Datasets API since pyarrow 1.0.0 # Previous behaviour was pyarrow partitioned columns become 'category' dtypes # These are added to back of dataframe on read. In new API category dtype is - # only used if partition field is string. - legacy_read_table = LooseVersion(pyarrow.__version__) < LooseVersion("1.0.0") - if partition_col and legacy_read_table: - partition_col_type = "category" - else: - partition_col_type = "int32" - - expected_df[partition_col] = expected_df[partition_col].astype( - partition_col_type + # only used if partition field is string, but this changed again to use + # category dtype for all types (not only strings) in pyarrow 2.0.0 + pa10 = (LooseVersion(pyarrow.__version__) >= LooseVersion("1.0.0")) and ( + LooseVersion(pyarrow.__version__) < LooseVersion("2.0.0") ) + if partition_col: + if pa10: + partition_col_type = "int32" + else: + partition_col_type = "category" + + expected_df[partition_col] = expected_df[partition_col].astype( + partition_col_type + ) check_round_trip( df_compat,
Follow-up on https://github.com/pandas-dev/pandas/pull/35814, but now incorporating the changes in pyarrow 2.0 cc @alimcmaster1 Also related to https://github.com/pandas-dev/pandas/issues/37286 and https://github.com/pandas-dev/pandas/pull/37296/ (timezone related failures), but doing this fix separate as it's not something that needs to be reverted.
https://api.github.com/repos/pandas-dev/pandas/pulls/37304
2020-10-21T10:10:07Z
2020-10-21T12:06:58Z
2020-10-21T12:06:57Z
2020-10-21T12:25:05Z
CI: temporary skip parquet tz test for pyarrow>=2.0.0
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 67ee9348394dd..002271ead1e38 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -765,6 +765,10 @@ def test_timestamp_nanoseconds(self, pa): check_round_trip(df, pa, write_kwargs={"version": "2.0"}) def test_timezone_aware_index(self, pa, timezone_aware_date_list): + if LooseVersion(pyarrow.__version__) >= LooseVersion("2.0.0"): + # temporary skip this test until it is properly resolved + # https://github.com/pandas-dev/pandas/issues/37286 + pytest.skip() idx = 5 * [timezone_aware_date_list] df = pd.DataFrame(index=idx, data={"index_as_col": idx})
See https://github.com/pandas-dev/pandas/issues/37286, this PR is not the actual fix, but at least ensures we don't have failing CI in other PRs (I prefer to skip the test instead of pinning pyarrow to <= 1.0, since all other tests are passing on 2.0, which is good to still run)
https://api.github.com/repos/pandas-dev/pandas/pulls/37303
2020-10-21T08:55:05Z
2020-10-21T10:49:56Z
2020-10-21T10:49:56Z
2020-10-21T12:07:28Z
BUG: Allow empty chunksize in stata reader when using iterator
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index 043b817bb9026..eb68ca38ea5b6 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -23,6 +23,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.resample(...).apply(...)` raised ``AttributeError`` when input was a :class:`DataFrame` and only a :class:`Series` was evaluated (:issue:`36951`) - Fixed regression in :class:`PeriodDtype` comparing both equal and unequal to its string representation (:issue:`37265`) - Fixed regression in certain offsets (:meth:`pd.offsets.Day() <pandas.tseries.offsets.Day>` and below) no longer being hashable (:issue:`37267`) +- Fixed regression in :class:`StataReader` which required ``chunksize`` to be manually set when using an iterator to read a dataset (:issue:`37280`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/stata.py b/pandas/io/stata.py index c128c56f496cc..cec73ceb17f09 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -469,7 +469,7 @@ class PossiblePrecisionLoss(Warning): precision_loss_doc = """ -Column converted from %s to %s, and some data are outside of the lossless +Column converted from {0} to {1}, and some data are outside of the lossless conversion range. This may result in a loss of precision in the saved data. """ @@ -543,7 +543,7 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame: object in a DataFrame. """ ws = "" - # original, if small, if large + # original, if small, if large conversion_data = ( (np.bool_, np.int8, np.int8), (np.uint8, np.int8, np.int16), @@ -563,7 +563,7 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame: dtype = c_data[1] else: dtype = c_data[2] - if c_data[2] == np.float64: # Warn if necessary + if c_data[2] == np.int64: # Warn if necessary if data[col].max() >= 2 ** 53: ws = precision_loss_doc.format("uint64", "float64") @@ -627,12 +627,12 @@ def __init__(self, catarray: Series, encoding: str = "latin-1"): self.value_labels = list(zip(np.arange(len(categories)), categories)) self.value_labels.sort(key=lambda x: x[0]) self.text_len = 0 - self.off: List[int] = [] - self.val: List[int] = [] self.txt: List[bytes] = [] self.n = 0 # Compute lengths and setup lists of offsets and labels + offsets: List[int] = [] + values: List[int] = [] for vl in self.value_labels: category = vl[1] if not isinstance(category, str): @@ -642,9 +642,9 @@ def __init__(self, catarray: Series, encoding: str = "latin-1"): ValueLabelTypeMismatch, ) category = category.encode(encoding) - self.off.append(self.text_len) + offsets.append(self.text_len) self.text_len += len(category) + 1 # +1 for the padding - self.val.append(vl[0]) + values.append(vl[0]) self.txt.append(category) self.n += 1 @@ -655,8 +655,8 @@ def __init__(self, catarray: Series, encoding: str = "latin-1"): ) # Ensure int32 - self.off = np.array(self.off, dtype=np.int32) - self.val = np.array(self.val, dtype=np.int32) + self.off = np.array(offsets, dtype=np.int32) + self.val = np.array(values, dtype=np.int32) # Total length self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len @@ -868,23 +868,23 @@ def __init__(self): # with a label, but the underlying variable is -127 to 100 # we're going to drop the label and cast to int self.DTYPE_MAP = dict( - list(zip(range(1, 245), ["a" + str(i) for i in range(1, 245)])) + list(zip(range(1, 245), [np.dtype("a" + str(i)) for i in range(1, 245)])) + [ - (251, np.int8), - (252, np.int16), - (253, np.int32), - (254, np.float32), - (255, np.float64), + (251, np.dtype(np.int8)), + (252, np.dtype(np.int16)), + (253, np.dtype(np.int32)), + (254, np.dtype(np.float32)), + (255, np.dtype(np.float64)), ] ) self.DTYPE_MAP_XML = dict( [ - (32768, np.uint8), # Keys to GSO - (65526, np.float64), - (65527, np.float32), - (65528, np.int32), - (65529, np.int16), - (65530, np.int8), + (32768, np.dtype(np.uint8)), # Keys to GSO + (65526, np.dtype(np.float64)), + (65527, np.dtype(np.float32)), + (65528, np.dtype(np.int32)), + (65529, np.dtype(np.int16)), + (65530, np.dtype(np.int8)), ] ) # error: Argument 1 to "list" has incompatible type "str"; @@ -1045,9 +1045,10 @@ def __init__( self._order_categoricals = order_categoricals self._encoding = "" self._chunksize = chunksize - if self._chunksize is not None and ( - not isinstance(chunksize, int) or chunksize <= 0 - ): + self._using_iterator = False + if self._chunksize is None: + self._chunksize = 1 + elif not isinstance(chunksize, int) or chunksize <= 0: raise ValueError("chunksize must be a positive integer when set.") # State variables for the file @@ -1057,7 +1058,7 @@ def __init__( self._column_selector_set = False self._value_labels_read = False self._data_read = False - self._dtype = None + self._dtype: Optional[np.dtype] = None self._lines_read = 0 self._native_byteorder = _set_endianness(sys.byteorder) @@ -1193,7 +1194,7 @@ def _read_new_header(self) -> None: # Get data type information, works for versions 117-119. def _get_dtypes( self, seek_vartypes: int - ) -> Tuple[List[Union[int, str]], List[Union[int, np.dtype]]]: + ) -> Tuple[List[Union[int, str]], List[Union[str, np.dtype]]]: self.path_or_buf.seek(seek_vartypes) raw_typlist = [ @@ -1518,11 +1519,8 @@ def _read_strls(self) -> None: self.GSO[str(v_o)] = decoded_va def __next__(self) -> DataFrame: - if self._chunksize is None: - raise ValueError( - "chunksize must be set to a positive integer to use as an iterator." - ) - return self.read(nrows=self._chunksize or 1) + self._using_iterator = True + return self.read(nrows=self._chunksize) def get_chunk(self, size: Optional[int] = None) -> DataFrame: """ @@ -1690,11 +1688,15 @@ def any_startswith(x: str) -> bool: convert = False for col in data: dtype = data[col].dtype - if dtype in (np.float16, np.float32): - dtype = np.float64 + if dtype in (np.dtype(np.float16), np.dtype(np.float32)): + dtype = np.dtype(np.float64) convert = True - elif dtype in (np.int8, np.int16, np.int32): - dtype = np.int64 + elif dtype in ( + np.dtype(np.int8), + np.dtype(np.int16), + np.dtype(np.int32), + ): + dtype = np.dtype(np.int64) convert = True retyped_data.append((col, data[col].astype(dtype))) if convert: @@ -1806,14 +1808,14 @@ def _do_convert_categoricals( keys = np.array(list(vl.keys())) column = data[col] key_matches = column.isin(keys) - if self._chunksize is not None and key_matches.all(): - initial_categories = keys + if self._using_iterator and key_matches.all(): + initial_categories: Optional[np.ndarray] = keys # If all categories are in the keys and we are iterating, # use the same keys for all chunks. If some are missing # value labels, then we will fall back to the categories # varying across chunks. else: - if self._chunksize is not None: + if self._using_iterator: # warn is using an iterator warnings.warn( categorical_conversion_warning, CategoricalConversionWarning @@ -2024,7 +2026,7 @@ def _convert_datetime_to_stata_type(fmt: str) -> np.dtype: "ty", "%ty", ]: - return np.float64 # Stata expects doubles for SIFs + return np.dtype(np.float64) # Stata expects doubles for SIFs else: raise NotImplementedError(f"Format {fmt} not implemented") diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index d5c2ac755ee4d..fecffd75f9478 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -1966,9 +1966,6 @@ def test_iterator_errors(dirpath): StataReader(dta_file, chunksize=0) with pytest.raises(ValueError, match="chunksize must be a positive"): StataReader(dta_file, chunksize="apple") - with pytest.raises(ValueError, match="chunksize must be set to a positive"): - with StataReader(dta_file) as reader: - reader.__next__() def test_iterator_value_labels(): @@ -1983,3 +1980,20 @@ def test_iterator_value_labels(): for i in range(2): tm.assert_index_equal(chunk.dtypes[i].categories, expected) tm.assert_frame_equal(chunk, df.iloc[j * 100 : (j + 1) * 100]) + + +def test_precision_loss(): + df = DataFrame( + [[sum(2 ** i for i in range(60)), sum(2 ** i for i in range(52))]], + columns=["big", "little"], + ) + with tm.ensure_clean() as path: + with tm.assert_produces_warning( + PossiblePrecisionLoss, match="Column converted from int64 to float64" + ): + df.to_stata(path, write_index=False) + reread = read_stata(path) + expected_dt = Series([np.float64, np.float64], index=["big", "little"]) + tm.assert_series_equal(reread.dtypes, expected_dt) + assert reread.loc[0, "little"] == df.loc[0, "little"] + assert reread.loc[0, "big"] == float(df.loc[0, "big"])
- [X] closes #37280 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37302
2020-10-21T08:15:17Z
2020-10-23T12:15:12Z
2020-10-23T12:15:12Z
2020-10-23T12:35:48Z
TST: collect tests by method
diff --git a/pandas/tests/frame/methods/test_count.py b/pandas/tests/frame/methods/test_count.py index 13a93e3efc48c..6d4ce3fa0dd4e 100644 --- a/pandas/tests/frame/methods/test_count.py +++ b/pandas/tests/frame/methods/test_count.py @@ -1,4 +1,7 @@ -from pandas import DataFrame, Series +import numpy as np +import pytest + +from pandas import DataFrame, Index, Series import pandas._testing as tm @@ -34,3 +37,85 @@ def test_count_objects(self, float_string_frame): tm.assert_series_equal(dm.count(), df.count()) tm.assert_series_equal(dm.count(1), df.count(1)) + + def test_count_level_corner(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + ser = frame["A"][:0] + result = ser.count(level=0) + expected = Series(0, index=ser.index.levels[0], name="A") + tm.assert_series_equal(result, expected) + + df = frame[:0] + result = df.count(level=0) + expected = ( + DataFrame( + index=ser.index.levels[0].set_names(["first"]), columns=df.columns + ) + .fillna(0) + .astype(np.int64) + ) + tm.assert_frame_equal(result, expected) + + def test_count_index_with_nan(self): + # https://github.com/pandas-dev/pandas/issues/21824 + df = DataFrame( + { + "Person": ["John", "Myla", None, "John", "Myla"], + "Age": [24.0, 5, 21.0, 33, 26], + "Single": [False, True, True, True, False], + } + ) + + # count on row labels + res = df.set_index(["Person", "Single"]).count(level="Person") + expected = DataFrame( + index=Index(["John", "Myla"], name="Person"), + columns=Index(["Age"]), + data=[2, 2], + ) + tm.assert_frame_equal(res, expected) + + # count on column labels + res = df.set_index(["Person", "Single"]).T.count(level="Person", axis=1) + expected = DataFrame( + columns=Index(["John", "Myla"], name="Person"), + index=Index(["Age"]), + data=[[2, 2]], + ) + tm.assert_frame_equal(res, expected) + + def test_count_level( + self, + multiindex_year_month_day_dataframe_random_data, + multiindex_dataframe_random_data, + ): + ymd = multiindex_year_month_day_dataframe_random_data + frame = multiindex_dataframe_random_data + + def _check_counts(frame, axis=0): + index = frame._get_axis(axis) + for i in range(index.nlevels): + result = frame.count(axis=axis, level=i) + expected = frame.groupby(axis=axis, level=i).count() + expected = expected.reindex_like(result).astype("i8") + tm.assert_frame_equal(result, expected) + + frame.iloc[1, [1, 2]] = np.nan + frame.iloc[7, [0, 1]] = np.nan + ymd.iloc[1, [1, 2]] = np.nan + ymd.iloc[7, [0, 1]] = np.nan + + _check_counts(frame) + _check_counts(ymd) + _check_counts(frame.T, axis=1) + _check_counts(ymd.T, axis=1) + + # can't call with level on regular DataFrame + df = tm.makeTimeDataFrame() + with pytest.raises(TypeError, match="hierarchical"): + df.count(level=0) + + frame["D"] = "foo" + result = frame.count(level=0, numeric_only=True) + tm.assert_index_equal(result.columns, Index(list("ABC"), name="exp")) diff --git a/pandas/tests/frame/methods/test_pop.py b/pandas/tests/frame/methods/test_pop.py index fccb3f10dde45..2926e29e61d56 100644 --- a/pandas/tests/frame/methods/test_pop.py +++ b/pandas/tests/frame/methods/test_pop.py @@ -1,4 +1,6 @@ -from pandas import DataFrame, Series +import numpy as np + +from pandas import DataFrame, MultiIndex, Series import pandas._testing as tm @@ -38,3 +40,28 @@ def test_pop_non_unique_cols(self): assert "b" in df.columns assert "a" not in df.columns assert len(df.index) == 2 + + def test_mixed_depth_pop(self): + arrays = [ + ["a", "top", "top", "routine1", "routine1", "routine2"], + ["", "OD", "OD", "result1", "result2", "result1"], + ["", "wx", "wy", "", "", ""], + ] + + tuples = sorted(zip(*arrays)) + index = MultiIndex.from_tuples(tuples) + df = DataFrame(np.random.randn(4, 6), columns=index) + + df1 = df.copy() + df2 = df.copy() + result = df1.pop("a") + expected = df2.pop(("a", "", "")) + tm.assert_series_equal(expected, result, check_names=False) + tm.assert_frame_equal(df1, df2) + assert result.name == "a" + + expected = df1["top"] + df1 = df1.drop(["top"], axis=1) + result = df2.pop("top") + tm.assert_frame_equal(expected, result) + tm.assert_frame_equal(df1, df2) diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py index ebe7eabd53b46..8927ab7c5ef79 100644 --- a/pandas/tests/frame/methods/test_set_index.py +++ b/pandas/tests/frame/methods/test_set_index.py @@ -3,7 +3,16 @@ import numpy as np import pytest -from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series, date_range +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + date_range, + period_range, + to_datetime, +) import pandas._testing as tm @@ -352,6 +361,112 @@ def test_construction_with_categorical_index(self): idf = idf.reset_index().set_index("B") tm.assert_index_equal(idf.index, ci) + def test_set_index_datetime(self): + # GH#3950 + df = DataFrame( + { + "label": ["a", "a", "a", "b", "b", "b"], + "datetime": [ + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + ], + "value": range(6), + } + ) + df.index = to_datetime(df.pop("datetime"), utc=True) + df.index = df.index.tz_convert("US/Pacific") + + expected = DatetimeIndex( + ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], + name="datetime", + ) + expected = expected.tz_localize("UTC").tz_convert("US/Pacific") + + df = df.set_index("label", append=True) + tm.assert_index_equal(df.index.levels[0], expected) + tm.assert_index_equal(df.index.levels[1], Index(["a", "b"], name="label")) + assert df.index.names == ["datetime", "label"] + + df = df.swaplevel(0, 1) + tm.assert_index_equal(df.index.levels[0], Index(["a", "b"], name="label")) + tm.assert_index_equal(df.index.levels[1], expected) + assert df.index.names == ["label", "datetime"] + + df = DataFrame(np.random.random(6)) + idx1 = DatetimeIndex( + [ + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + ], + tz="US/Eastern", + ) + idx2 = DatetimeIndex( + [ + "2012-04-01 09:00", + "2012-04-01 09:00", + "2012-04-01 09:00", + "2012-04-02 09:00", + "2012-04-02 09:00", + "2012-04-02 09:00", + ], + tz="US/Eastern", + ) + idx3 = date_range("2011-01-01 09:00", periods=6, tz="Asia/Tokyo") + idx3 = idx3._with_freq(None) + + df = df.set_index(idx1) + df = df.set_index(idx2, append=True) + df = df.set_index(idx3, append=True) + + expected1 = DatetimeIndex( + ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], + tz="US/Eastern", + ) + expected2 = DatetimeIndex( + ["2012-04-01 09:00", "2012-04-02 09:00"], tz="US/Eastern" + ) + + tm.assert_index_equal(df.index.levels[0], expected1) + tm.assert_index_equal(df.index.levels[1], expected2) + tm.assert_index_equal(df.index.levels[2], idx3) + + # GH#7092 + tm.assert_index_equal(df.index.get_level_values(0), idx1) + tm.assert_index_equal(df.index.get_level_values(1), idx2) + tm.assert_index_equal(df.index.get_level_values(2), idx3) + + def test_set_index_period(self): + # GH#6631 + df = DataFrame(np.random.random(6)) + idx1 = period_range("2011-01-01", periods=3, freq="M") + idx1 = idx1.append(idx1) + idx2 = period_range("2013-01-01 09:00", periods=2, freq="H") + idx2 = idx2.append(idx2).append(idx2) + idx3 = period_range("2005", periods=6, freq="A") + + df = df.set_index(idx1) + df = df.set_index(idx2, append=True) + df = df.set_index(idx3, append=True) + + expected1 = period_range("2011-01-01", periods=3, freq="M") + expected2 = period_range("2013-01-01 09:00", periods=2, freq="H") + + tm.assert_index_equal(df.index.levels[0], expected1) + tm.assert_index_equal(df.index.levels[1], expected2) + tm.assert_index_equal(df.index.levels[2], idx3) + + tm.assert_index_equal(df.index.get_level_values(0), idx1) + tm.assert_index_equal(df.index.get_level_values(1), idx2) + tm.assert_index_equal(df.index.get_level_values(2), idx3) + class TestSetIndexInvalid: def test_set_index_verify_integrity(self, frame_of_index_cols): diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index a106702aff807..da2f90c1c4e32 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -662,6 +662,12 @@ def test_sort_index_level_mixed(self): sorted_after.drop([("foo", "three")], axis=1), ) + def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.sort_index() + assert result.index.names == frame.index.names + class TestDataFrameSortIndexKey: def test_sort_multi_index_key(self): diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 9cf5afc09e800..54e0e8d0ec8d7 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1195,6 +1195,40 @@ def test_preserve_timezone(self, initial: str, method): result = getattr(df, method)(axis=1) tm.assert_series_equal(result, expected) + def test_frame_any_all_with_level(self): + df = DataFrame( + {"data": [False, False, True, False, True, False, True]}, + index=[ + ["one", "one", "two", "one", "two", "two", "two"], + [0, 1, 0, 2, 1, 2, 3], + ], + ) + + result = df.any(level=0) + ex = DataFrame({"data": [False, True]}, index=["one", "two"]) + tm.assert_frame_equal(result, ex) + + result = df.all(level=0) + ex = DataFrame({"data": [False, False]}, index=["one", "two"]) + tm.assert_frame_equal(result, ex) + + def test_frame_any_with_timedelta(self): + # GH#17667 + df = DataFrame( + { + "a": Series([0, 0]), + "t": Series([pd.to_timedelta(0, "s"), pd.to_timedelta(1, "ms")]), + } + ) + + result = df.any(axis=0) + expected = Series(data=[False, True], index=["a", "t"]) + tm.assert_series_equal(result, expected) + + result = df.any(axis=1) + expected = Series(data=[False, True]) + tm.assert_series_equal(result, expected) + def test_mixed_frame_with_integer_sum(): # https://github.com/pandas-dev/pandas/issues/34520 diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 593d1c78a19e2..229d9de5eaad2 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -1,6 +1,7 @@ import numpy as np +import pytest -from pandas import MultiIndex, NaT, Series, date_range +from pandas import MultiIndex, NaT, Series, date_range, period_range import pandas.testing as tm @@ -26,3 +27,15 @@ def test_setitem_multiindex_empty_slice(self): expected = result.copy() result.loc[[]] = 0 tm.assert_series_equal(result, expected) + + +class TestSetitemPeriodDtype: + @pytest.mark.parametrize("na_val", [None, np.nan]) + def test_setitem_na_period_dtype_casts_to_nat(self, na_val): + ser = Series(period_range("2000-01-01", periods=10, freq="D")) + + ser[3] = na_val + assert ser[3] is NaT + + ser[3:5] = na_val + assert ser[4] is NaT diff --git a/pandas/tests/series/methods/test_repeat.py b/pandas/tests/series/methods/test_repeat.py index b8e01e79ea02f..32f7384d34ebd 100644 --- a/pandas/tests/series/methods/test_repeat.py +++ b/pandas/tests/series/methods/test_repeat.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from pandas import Series +from pandas import MultiIndex, Series import pandas._testing as tm @@ -28,3 +28,10 @@ def test_numpy_repeat(self): msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.repeat(ser, 2, axis=0) + + def test_repeat_with_multiindex(self): + # GH#9361, fixed by GH#7891 + m_idx = MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6), (7, 8)]) + data = ["a", "b", "c", "d"] + m_df = Series(data, index=m_idx) + assert m_df.repeat(3).shape == (3 * len(data),) diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index 52a398a00dfe5..2c32342f8802a 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -49,21 +49,6 @@ def test_NaT_cast(self): expected = Series([pd.NaT], dtype="period[D]") tm.assert_series_equal(result, expected) - def test_set_none(self): - self.series[3] = None - assert self.series[3] is pd.NaT - - self.series[3:5] = None - assert self.series[4] is pd.NaT - - def test_set_nan(self): - # Do we want to allow this? - self.series[5] = np.nan - assert self.series[5] is pd.NaT - - self.series[5:7] = np.nan - assert self.series[6] is pd.NaT - def test_intercept_astype_object(self): expected = self.series.astype("object") diff --git a/pandas/tests/test_join.py b/pandas/tests/test_join.py index 129dc275c4d5a..03198ec3289dd 100644 --- a/pandas/tests/test_join.py +++ b/pandas/tests/test_join.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from pandas._libs import join as _join +from pandas._libs import join as libjoin from pandas import Categorical, DataFrame, Index, merge import pandas._testing as tm @@ -12,7 +12,7 @@ class TestIndexer: "dtype", ["int32", "int64", "float32", "float64", "object"] ) def test_outer_join_indexer(self, dtype): - indexer = _join.outer_join_indexer + indexer = libjoin.outer_join_indexer left = np.arange(3, dtype=dtype) right = np.arange(2, 5, dtype=dtype) @@ -47,7 +47,7 @@ def test_left_join_indexer_unique(): a = np.array([1, 2, 3, 4, 5], dtype=np.int64) b = np.array([2, 2, 3, 4, 4], dtype=np.int64) - result = _join.left_join_indexer_unique(b, a) + result = libjoin.left_join_indexer_unique(b, a) expected = np.array([1, 1, 2, 3, 3], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) @@ -162,7 +162,7 @@ def test_left_outer_join_bug(): right = np.array([3, 1], dtype=np.int64) max_groups = 4 - lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False) + lidx, ridx = libjoin.left_outer_join(left, right, max_groups, sort=False) exp_lidx = np.arange(len(left), dtype=np.int64) exp_ridx = -np.ones(len(left), dtype=np.int64) @@ -178,7 +178,7 @@ def test_inner_join_indexer(): a = np.array([1, 2, 3, 4, 5], dtype=np.int64) b = np.array([0, 3, 5, 7, 9], dtype=np.int64) - index, ares, bres = _join.inner_join_indexer(a, b) + index, ares, bres = libjoin.inner_join_indexer(a, b) index_exp = np.array([3, 5], dtype=np.int64) tm.assert_almost_equal(index, index_exp) @@ -191,7 +191,7 @@ def test_inner_join_indexer(): a = np.array([5], dtype=np.int64) b = np.array([5], dtype=np.int64) - index, ares, bres = _join.inner_join_indexer(a, b) + index, ares, bres = libjoin.inner_join_indexer(a, b) tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64)) tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64)) tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64)) @@ -201,7 +201,7 @@ def test_outer_join_indexer(): a = np.array([1, 2, 3, 4, 5], dtype=np.int64) b = np.array([0, 3, 5, 7, 9], dtype=np.int64) - index, ares, bres = _join.outer_join_indexer(a, b) + index, ares, bres = libjoin.outer_join_indexer(a, b) index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64) tm.assert_almost_equal(index, index_exp) @@ -214,7 +214,7 @@ def test_outer_join_indexer(): a = np.array([5], dtype=np.int64) b = np.array([5], dtype=np.int64) - index, ares, bres = _join.outer_join_indexer(a, b) + index, ares, bres = libjoin.outer_join_indexer(a, b) tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64)) tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64)) tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64)) @@ -224,7 +224,7 @@ def test_left_join_indexer(): a = np.array([1, 2, 3, 4, 5], dtype=np.int64) b = np.array([0, 3, 5, 7, 9], dtype=np.int64) - index, ares, bres = _join.left_join_indexer(a, b) + index, ares, bres = libjoin.left_join_indexer(a, b) tm.assert_almost_equal(index, a) @@ -236,7 +236,7 @@ def test_left_join_indexer(): a = np.array([5], dtype=np.int64) b = np.array([5], dtype=np.int64) - index, ares, bres = _join.left_join_indexer(a, b) + index, ares, bres = libjoin.left_join_indexer(a, b) tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64)) tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64)) tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64)) @@ -246,7 +246,7 @@ def test_left_join_indexer2(): idx = Index([1, 1, 2, 5]) idx2 = Index([1, 2, 5, 7, 9]) - res, lidx, ridx = _join.left_join_indexer(idx2.values, idx.values) + res, lidx, ridx = libjoin.left_join_indexer(idx2.values, idx.values) exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64) tm.assert_almost_equal(res, exp_res) @@ -262,7 +262,7 @@ def test_outer_join_indexer2(): idx = Index([1, 1, 2, 5]) idx2 = Index([1, 2, 5, 7, 9]) - res, lidx, ridx = _join.outer_join_indexer(idx2.values, idx.values) + res, lidx, ridx = libjoin.outer_join_indexer(idx2.values, idx.values) exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64) tm.assert_almost_equal(res, exp_res) @@ -278,7 +278,7 @@ def test_inner_join_indexer2(): idx = Index([1, 1, 2, 5]) idx2 = Index([1, 2, 5, 7, 9]) - res, lidx, ridx = _join.inner_join_indexer(idx2.values, idx.values) + res, lidx, ridx = libjoin.inner_join_indexer(idx2.values, idx.values) exp_res = np.array([1, 1, 2, 5], dtype=np.int64) tm.assert_almost_equal(res, exp_res) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 9c29d3a062dfa..c5d678f412601 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -224,69 +224,6 @@ def test_reset_index_with_drop( assert isinstance(deleveled, Series) assert deleveled.index.name == self.series.index.name - def test_count_level( - self, - multiindex_year_month_day_dataframe_random_data, - multiindex_dataframe_random_data, - ): - ymd = multiindex_year_month_day_dataframe_random_data - frame = multiindex_dataframe_random_data - - def _check_counts(frame, axis=0): - index = frame._get_axis(axis) - for i in range(index.nlevels): - result = frame.count(axis=axis, level=i) - expected = frame.groupby(axis=axis, level=i).count() - expected = expected.reindex_like(result).astype("i8") - tm.assert_frame_equal(result, expected) - - frame.iloc[1, [1, 2]] = np.nan - frame.iloc[7, [0, 1]] = np.nan - ymd.iloc[1, [1, 2]] = np.nan - ymd.iloc[7, [0, 1]] = np.nan - - _check_counts(frame) - _check_counts(ymd) - _check_counts(frame.T, axis=1) - _check_counts(ymd.T, axis=1) - - # can't call with level on regular DataFrame - df = tm.makeTimeDataFrame() - with pytest.raises(TypeError, match="hierarchical"): - df.count(level=0) - - frame["D"] = "foo" - result = frame.count(level=0, numeric_only=True) - tm.assert_index_equal(result.columns, Index(list("ABC"), name="exp")) - - def test_count_index_with_nan(self): - # https://github.com/pandas-dev/pandas/issues/21824 - df = DataFrame( - { - "Person": ["John", "Myla", None, "John", "Myla"], - "Age": [24.0, 5, 21.0, 33, 26], - "Single": [False, True, True, True, False], - } - ) - - # count on row labels - res = df.set_index(["Person", "Single"]).count(level="Person") - expected = DataFrame( - index=Index(["John", "Myla"], name="Person"), - columns=Index(["Age"]), - data=[2, 2], - ) - tm.assert_frame_equal(res, expected) - - # count on column labels - res = df.set_index(["Person", "Single"]).T.count(level="Person", axis=1) - expected = DataFrame( - columns=Index(["John", "Myla"], name="Person"), - index=Index(["Age"]), - data=[[2, 2]], - ) - tm.assert_frame_equal(res, expected) - def test_count_level_series(self): index = MultiIndex( levels=[["foo", "bar", "baz"], ["one", "two", "three", "four"]], @@ -307,23 +244,6 @@ def test_count_level_series(self): result.astype("f8"), expected.reindex(result.index).fillna(0) ) - def test_count_level_corner(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - - s = frame["A"][:0] - result = s.count(level=0) - expected = Series(0, index=s.index.levels[0], name="A") - tm.assert_series_equal(result, expected) - - df = frame[:0] - result = df.count(level=0) - expected = ( - DataFrame(index=s.index.levels[0].set_names(["first"]), columns=df.columns) - .fillna(0) - .astype(np.int64) - ) - tm.assert_frame_equal(result, expected) - def test_get_level_number_out_of_bounds(self, multiindex_dataframe_random_data): frame = multiindex_dataframe_random_data @@ -1138,40 +1058,6 @@ def test_stat_op_corner(self): expected = Series([10.0], index=[2]) tm.assert_series_equal(result, expected) - def test_frame_any_all_group(self): - df = DataFrame( - {"data": [False, False, True, False, True, False, True]}, - index=[ - ["one", "one", "two", "one", "two", "two", "two"], - [0, 1, 0, 2, 1, 2, 3], - ], - ) - - result = df.any(level=0) - ex = DataFrame({"data": [False, True]}, index=["one", "two"]) - tm.assert_frame_equal(result, ex) - - result = df.all(level=0) - ex = DataFrame({"data": [False, False]}, index=["one", "two"]) - tm.assert_frame_equal(result, ex) - - def test_series_any_timedelta(self): - # GH 17667 - df = DataFrame( - { - "a": Series([0, 0]), - "t": Series([pd.to_timedelta(0, "s"), pd.to_timedelta(1, "ms")]), - } - ) - - result = df.any(axis=0) - expected = Series(data=[False, True], index=["a", "t"]) - tm.assert_series_equal(result, expected) - - result = df.any(axis=1) - expected = Series(data=[False, True]) - tm.assert_series_equal(result, expected) - def test_std_var_pass_ddof(self): index = MultiIndex.from_arrays( [np.arange(5).repeat(10), np.tile(np.arange(10), 5)] @@ -1349,31 +1235,6 @@ def test_level_with_tuples(self): tm.assert_frame_equal(result, expected) tm.assert_frame_equal(result2, expected) - def test_mixed_depth_pop(self): - arrays = [ - ["a", "top", "top", "routine1", "routine1", "routine2"], - ["", "OD", "OD", "result1", "result2", "result1"], - ["", "wx", "wy", "", "", ""], - ] - - tuples = sorted(zip(*arrays)) - index = MultiIndex.from_tuples(tuples) - df = DataFrame(randn(4, 6), columns=index) - - df1 = df.copy() - df2 = df.copy() - result = df1.pop("a") - expected = df2.pop(("a", "", "")) - tm.assert_series_equal(expected, result, check_names=False) - tm.assert_frame_equal(df1, df2) - assert result.name == "a" - - expected = df1["top"] - df1 = df1.drop(["top"], axis=1) - result = df2.pop("top") - tm.assert_frame_equal(expected, result) - tm.assert_frame_equal(df1, df2) - def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data): frame = multiindex_dataframe_random_data @@ -1517,180 +1378,98 @@ def test_multiindex_set_index(self): # it works! df.set_index(index) - def test_set_index_datetime(self): + def test_reset_index_datetime(self, tz_naive_fixture): # GH 3950 + tz = tz_naive_fixture + idx1 = pd.date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx1") + idx2 = Index(range(5), name="idx2", dtype="int64") + idx = MultiIndex.from_arrays([idx1, idx2]) df = DataFrame( - { - "label": ["a", "a", "a", "b", "b", "b"], - "datetime": [ - "2011-07-19 07:00:00", - "2011-07-19 08:00:00", - "2011-07-19 09:00:00", - "2011-07-19 07:00:00", - "2011-07-19 08:00:00", - "2011-07-19 09:00:00", - ], - "value": range(6), - } + {"a": np.arange(5, dtype="int64"), "b": ["A", "B", "C", "D", "E"]}, + index=idx, ) - df.index = pd.to_datetime(df.pop("datetime"), utc=True) - df.index = df.index.tz_convert("US/Pacific") - expected = pd.DatetimeIndex( - ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], - name="datetime", + expected = DataFrame( + { + "idx1": [ + datetime.datetime(2011, 1, 1), + datetime.datetime(2011, 1, 2), + datetime.datetime(2011, 1, 3), + datetime.datetime(2011, 1, 4), + datetime.datetime(2011, 1, 5), + ], + "idx2": np.arange(5, dtype="int64"), + "a": np.arange(5, dtype="int64"), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx1", "idx2", "a", "b"], ) - expected = expected.tz_localize("UTC").tz_convert("US/Pacific") - - df = df.set_index("label", append=True) - tm.assert_index_equal(df.index.levels[0], expected) - tm.assert_index_equal(df.index.levels[1], Index(["a", "b"], name="label")) - assert df.index.names == ["datetime", "label"] + expected["idx1"] = expected["idx1"].apply(lambda d: Timestamp(d, tz=tz)) - df = df.swaplevel(0, 1) - tm.assert_index_equal(df.index.levels[0], Index(["a", "b"], name="label")) - tm.assert_index_equal(df.index.levels[1], expected) - assert df.index.names == ["label", "datetime"] + tm.assert_frame_equal(df.reset_index(), expected) - df = DataFrame(np.random.random(6)) - idx1 = pd.DatetimeIndex( - [ - "2011-07-19 07:00:00", - "2011-07-19 08:00:00", - "2011-07-19 09:00:00", - "2011-07-19 07:00:00", - "2011-07-19 08:00:00", - "2011-07-19 09:00:00", - ], - tz="US/Eastern", + idx3 = pd.date_range( + "1/1/2012", periods=5, freq="MS", tz="Europe/Paris", name="idx3" ) - idx2 = pd.DatetimeIndex( - [ - "2012-04-01 09:00", - "2012-04-01 09:00", - "2012-04-01 09:00", - "2012-04-02 09:00", - "2012-04-02 09:00", - "2012-04-02 09:00", - ], - tz="US/Eastern", + idx = MultiIndex.from_arrays([idx1, idx2, idx3]) + df = DataFrame( + {"a": np.arange(5, dtype="int64"), "b": ["A", "B", "C", "D", "E"]}, + index=idx, ) - idx3 = pd.date_range("2011-01-01 09:00", periods=6, tz="Asia/Tokyo") - idx3 = idx3._with_freq(None) - - df = df.set_index(idx1) - df = df.set_index(idx2, append=True) - df = df.set_index(idx3, append=True) - expected1 = pd.DatetimeIndex( - ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], - tz="US/Eastern", + expected = DataFrame( + { + "idx1": [ + datetime.datetime(2011, 1, 1), + datetime.datetime(2011, 1, 2), + datetime.datetime(2011, 1, 3), + datetime.datetime(2011, 1, 4), + datetime.datetime(2011, 1, 5), + ], + "idx2": np.arange(5, dtype="int64"), + "idx3": [ + datetime.datetime(2012, 1, 1), + datetime.datetime(2012, 2, 1), + datetime.datetime(2012, 3, 1), + datetime.datetime(2012, 4, 1), + datetime.datetime(2012, 5, 1), + ], + "a": np.arange(5, dtype="int64"), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx1", "idx2", "idx3", "a", "b"], ) - expected2 = pd.DatetimeIndex( - ["2012-04-01 09:00", "2012-04-02 09:00"], tz="US/Eastern" + expected["idx1"] = expected["idx1"].apply(lambda d: Timestamp(d, tz=tz)) + expected["idx3"] = expected["idx3"].apply( + lambda d: Timestamp(d, tz="Europe/Paris") ) + tm.assert_frame_equal(df.reset_index(), expected) - tm.assert_index_equal(df.index.levels[0], expected1) - tm.assert_index_equal(df.index.levels[1], expected2) - tm.assert_index_equal(df.index.levels[2], idx3) - - # GH 7092 - tm.assert_index_equal(df.index.get_level_values(0), idx1) - tm.assert_index_equal(df.index.get_level_values(1), idx2) - tm.assert_index_equal(df.index.get_level_values(2), idx3) - - def test_reset_index_datetime(self): - # GH 3950 - for tz in ["UTC", "Asia/Tokyo", "US/Eastern"]: - idx1 = pd.date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx1") - idx2 = Index(range(5), name="idx2", dtype="int64") - idx = MultiIndex.from_arrays([idx1, idx2]) - df = DataFrame( - {"a": np.arange(5, dtype="int64"), "b": ["A", "B", "C", "D", "E"]}, - index=idx, - ) - - expected = DataFrame( - { - "idx1": [ - datetime.datetime(2011, 1, 1), - datetime.datetime(2011, 1, 2), - datetime.datetime(2011, 1, 3), - datetime.datetime(2011, 1, 4), - datetime.datetime(2011, 1, 5), - ], - "idx2": np.arange(5, dtype="int64"), - "a": np.arange(5, dtype="int64"), - "b": ["A", "B", "C", "D", "E"], - }, - columns=["idx1", "idx2", "a", "b"], - ) - expected["idx1"] = expected["idx1"].apply(lambda d: Timestamp(d, tz=tz)) - - tm.assert_frame_equal(df.reset_index(), expected) - - idx3 = pd.date_range( - "1/1/2012", periods=5, freq="MS", tz="Europe/Paris", name="idx3" - ) - idx = MultiIndex.from_arrays([idx1, idx2, idx3]) - df = DataFrame( - {"a": np.arange(5, dtype="int64"), "b": ["A", "B", "C", "D", "E"]}, - index=idx, - ) - - expected = DataFrame( - { - "idx1": [ - datetime.datetime(2011, 1, 1), - datetime.datetime(2011, 1, 2), - datetime.datetime(2011, 1, 3), - datetime.datetime(2011, 1, 4), - datetime.datetime(2011, 1, 5), - ], - "idx2": np.arange(5, dtype="int64"), - "idx3": [ - datetime.datetime(2012, 1, 1), - datetime.datetime(2012, 2, 1), - datetime.datetime(2012, 3, 1), - datetime.datetime(2012, 4, 1), - datetime.datetime(2012, 5, 1), - ], - "a": np.arange(5, dtype="int64"), - "b": ["A", "B", "C", "D", "E"], - }, - columns=["idx1", "idx2", "idx3", "a", "b"], - ) - expected["idx1"] = expected["idx1"].apply(lambda d: Timestamp(d, tz=tz)) - expected["idx3"] = expected["idx3"].apply( - lambda d: Timestamp(d, tz="Europe/Paris") - ) - tm.assert_frame_equal(df.reset_index(), expected) - - # GH 7793 - idx = MultiIndex.from_product( - [["a", "b"], pd.date_range("20130101", periods=3, tz=tz)] - ) - df = DataFrame( - np.arange(6, dtype="int64").reshape(6, 1), columns=["a"], index=idx - ) + # GH 7793 + idx = MultiIndex.from_product( + [["a", "b"], pd.date_range("20130101", periods=3, tz=tz)] + ) + df = DataFrame( + np.arange(6, dtype="int64").reshape(6, 1), columns=["a"], index=idx + ) - expected = DataFrame( - { - "level_0": "a a a b b b".split(), - "level_1": [ - datetime.datetime(2013, 1, 1), - datetime.datetime(2013, 1, 2), - datetime.datetime(2013, 1, 3), - ] - * 2, - "a": np.arange(6, dtype="int64"), - }, - columns=["level_0", "level_1", "a"], - ) - expected["level_1"] = expected["level_1"].apply( - lambda d: Timestamp(d, freq="D", tz=tz) - ) - tm.assert_frame_equal(df.reset_index(), expected) + expected = DataFrame( + { + "level_0": "a a a b b b".split(), + "level_1": [ + datetime.datetime(2013, 1, 1), + datetime.datetime(2013, 1, 2), + datetime.datetime(2013, 1, 3), + ] + * 2, + "a": np.arange(6, dtype="int64"), + }, + columns=["level_0", "level_1", "a"], + ) + expected["level_1"] = expected["level_1"].apply( + lambda d: Timestamp(d, freq="D", tz=tz) + ) + tm.assert_frame_equal(df.reset_index(), expected) def test_reset_index_period(self): # GH 7746 @@ -1768,38 +1547,6 @@ def test_reset_index_multiindex_columns(self): result = df2.rename_axis([("c", "ii")]).reset_index(col_level=1, col_fill="C") tm.assert_frame_equal(result, expected) - def test_set_index_period(self): - # GH 6631 - df = DataFrame(np.random.random(6)) - idx1 = pd.period_range("2011-01-01", periods=3, freq="M") - idx1 = idx1.append(idx1) - idx2 = pd.period_range("2013-01-01 09:00", periods=2, freq="H") - idx2 = idx2.append(idx2).append(idx2) - idx3 = pd.period_range("2005", periods=6, freq="A") - - df = df.set_index(idx1) - df = df.set_index(idx2, append=True) - df = df.set_index(idx3, append=True) - - expected1 = pd.period_range("2011-01-01", periods=3, freq="M") - expected2 = pd.period_range("2013-01-01 09:00", periods=2, freq="H") - - tm.assert_index_equal(df.index.levels[0], expected1) - tm.assert_index_equal(df.index.levels[1], expected2) - tm.assert_index_equal(df.index.levels[2], idx3) - - tm.assert_index_equal(df.index.get_level_values(0), idx1) - tm.assert_index_equal(df.index.get_level_values(1), idx2) - tm.assert_index_equal(df.index.get_level_values(2), idx3) - - def test_repeat(self): - # GH 9361 - # fixed by # GH 7891 - m_idx = MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6), (7, 8)]) - data = ["a", "b", "c", "d"] - m_df = Series(data, index=m_idx) - assert m_df.repeat(3).shape == (3 * len(data),) - def test_subsets_multiindex_dtype(self): # GH 20757 data = [["x", 1]] @@ -1813,18 +1560,9 @@ def test_subsets_multiindex_dtype(self): class TestSorted(Base): """ everything you wanted to test about sorting """ - def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - - result = frame.sort_index() - assert result.index.names == frame.index.names - - def test_sorting_repr_8017(self): - - np.random.seed(0) - data = np.random.randn(3, 4) - - for gen, extra in [ + @pytest.mark.parametrize( + "gen,extra", + [ ([1.0, 3.0, 2.0, 5.0], 4.0), ([1, 3, 2, 5], 4), ( @@ -1837,44 +1575,50 @@ def test_sorting_repr_8017(self): Timestamp("20130104"), ), (["1one", "3one", "2one", "5one"], "4one"), - ]: - columns = MultiIndex.from_tuples([("red", i) for i in gen]) - df = DataFrame(data, index=list("def"), columns=columns) - df2 = pd.concat( - [ - df, - DataFrame( - "world", - index=list("def"), - columns=MultiIndex.from_tuples([("red", extra)]), - ), - ], - axis=1, - ) + ], + ) + def test_sorting_repr_8017(self, gen, extra): + + np.random.seed(0) + data = np.random.randn(3, 4) + + columns = MultiIndex.from_tuples([("red", i) for i in gen]) + df = DataFrame(data, index=list("def"), columns=columns) + df2 = pd.concat( + [ + df, + DataFrame( + "world", + index=list("def"), + columns=MultiIndex.from_tuples([("red", extra)]), + ), + ], + axis=1, + ) - # check that the repr is good - # make sure that we have a correct sparsified repr - # e.g. only 1 header of read - assert str(df2).splitlines()[0].split() == ["red"] + # check that the repr is good + # make sure that we have a correct sparsified repr + # e.g. only 1 header of read + assert str(df2).splitlines()[0].split() == ["red"] - # GH 8017 - # sorting fails after columns added + # GH 8017 + # sorting fails after columns added - # construct single-dtype then sort - result = df.copy().sort_index(axis=1) - expected = df.iloc[:, [0, 2, 1, 3]] - tm.assert_frame_equal(result, expected) + # construct single-dtype then sort + result = df.copy().sort_index(axis=1) + expected = df.iloc[:, [0, 2, 1, 3]] + tm.assert_frame_equal(result, expected) - result = df2.sort_index(axis=1) - expected = df2.iloc[:, [0, 2, 1, 4, 3]] - tm.assert_frame_equal(result, expected) + result = df2.sort_index(axis=1) + expected = df2.iloc[:, [0, 2, 1, 4, 3]] + tm.assert_frame_equal(result, expected) - # setitem then sort - result = df.copy() - result[("red", extra)] = "world" + # setitem then sort + result = df.copy() + result[("red", extra)] = "world" - result = result.sort_index(axis=1) - tm.assert_frame_equal(result, expected) + result = result.sort_index(axis=1) + tm.assert_frame_equal(result, expected) def test_sort_non_lexsorted(self): # degenerate case where we sort but don't
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37300
2020-10-21T03:42:01Z
2020-10-21T12:08:57Z
2020-10-21T12:08:57Z
2020-10-21T14:49:20Z
API: require timezone match in DatetimeArray.shift
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index f1f24ab7a101b..23fe774e2fcac 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -375,6 +375,7 @@ Datetimelike - :class:`Timestamp` and :class:`DatetimeIndex` comparisons between timezone-aware and timezone-naive objects now follow the standard library ``datetime`` behavior, returning ``True``/``False`` for ``!=``/``==`` and raising for inequality comparisons (:issue:`28507`) - Bug in :meth:`DatetimeIndex.equals` and :meth:`TimedeltaIndex.equals` incorrectly considering ``int64`` indexes as equal (:issue:`36744`) - Bug in :meth:`TimedeltaIndex.sum` and :meth:`Series.sum` with ``timedelta64`` dtype on an empty index or series returning ``NaT`` instead of ``Timedelta(0)`` (:issue:`31751`) +- Bug in :meth:`DatetimeArray.shift` incorrectly allowing ``fill_value`` with a mismatched timezone (:issue:`37299`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 8e3b26503a61b..b988b4b804178 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -507,7 +507,7 @@ def _validate_shift_value(self, fill_value): ) fill_value = new_fill - return self._unbox(fill_value) + return self._unbox(fill_value, setitem=True) def _validate_scalar(self, value, allow_listlike: bool = False): """ diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 9245eda2a71fe..0fc110f3ef39b 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -437,6 +437,18 @@ def test_shift_value_tzawareness_mismatch(self): with pytest.raises(TypeError, match="Cannot compare"): dta.shift(1, fill_value=invalid) + def test_shift_requires_tzmatch(self): + # since filling is setitem-like, we require a matching timezone, + # not just matching tzawawreness + dti = pd.date_range("2016-01-01", periods=3, tz="UTC") + dta = dti._data + + fill_value = pd.Timestamp("2020-10-18 18:44", tz="US/Pacific") + + msg = "Timezones don't match. 'UTC' != 'US/Pacific'" + with pytest.raises(ValueError, match=msg): + dta.shift(1, fill_value=fill_value) + class TestSequenceToDT64NS: def test_tz_dtype_mismatch_raises(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37299
2020-10-21T01:46:18Z
2020-10-31T19:18:11Z
2020-10-31T19:18:11Z
2020-10-31T20:52:33Z
CI: Add unwanted pattern check
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index ab44598e04440..f01cd9ba01470 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -205,6 +205,8 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then MSG='Check for inconsistent use of pandas namespace in tests' ; echo $MSG check_namespace "Series" RET=$(($RET + $?)) + check_namespace "DataFrame" + RET=$(($RET + $?)) echo $MSG "DONE" fi diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index cd6a430829442..5f556718ea0d3 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -722,14 +722,14 @@ def test_timedelta_ops_with_missing_values(self): sn = pd.to_timedelta(Series([pd.NaT], dtype="m8[ns]")) - df1 = pd.DataFrame(["00:00:01"]).apply(pd.to_timedelta) - df2 = pd.DataFrame(["00:00:02"]).apply(pd.to_timedelta) + df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta) + df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta) with pytest.raises(TypeError, match=msg): # Passing datetime64-dtype data to TimedeltaIndex is no longer # supported GH#29794 - pd.DataFrame([pd.NaT]).apply(pd.to_timedelta) + DataFrame([pd.NaT]).apply(pd.to_timedelta) - dfn = pd.DataFrame([pd.NaT.value]).apply(pd.to_timedelta) + dfn = DataFrame([pd.NaT.value]).apply(pd.to_timedelta) scalar1 = pd.to_timedelta("00:00:01") scalar2 = pd.to_timedelta("00:00:02") diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 2c5846872c341..5796ea52899d2 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -667,7 +667,7 @@ def test_unary_in_array(self): @pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_float_comparison_bin_op(self, dtype): # GH 16363 - df = pd.DataFrame({"x": np.array([0], dtype=dtype)}) + df = DataFrame({"x": np.array([0], dtype=dtype)}) res = df.eval("x < -0.1") assert res.values == np.array([False]) @@ -734,7 +734,7 @@ def test_float_truncation(self): expected = np.float64(exp) assert result == expected - df = pd.DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) + df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) cutoff = 1000000000.0006 result = df.query(f"A < {cutoff:.4f}") assert result.empty @@ -751,12 +751,12 @@ def test_float_truncation(self): def test_disallow_python_keywords(self): # GH 18221 - df = pd.DataFrame([[0, 0, 0]], columns=["foo", "bar", "class"]) + df = DataFrame([[0, 0, 0]], columns=["foo", "bar", "class"]) msg = "Python keyword not valid identifier in numexpr query" with pytest.raises(SyntaxError, match=msg): df.query("class == 0") - df = pd.DataFrame() + df = DataFrame() df.index.name = "lambda" with pytest.raises(SyntaxError, match=msg): df.query("lambda == 0") @@ -1366,7 +1366,7 @@ def assignment_not_inplace(self): def test_multi_line_expression(self): # GH 11149 - df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) expected = df.copy() expected["c"] = expected["a"] + expected["b"] @@ -1403,7 +1403,7 @@ def test_multi_line_expression(self): def test_multi_line_expression_not_inplace(self): # GH 11149 - df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) expected = df.copy() expected["c"] = expected["a"] + expected["b"] @@ -1428,7 +1428,7 @@ def test_multi_line_expression_not_inplace(self): def test_multi_line_expression_local_variable(self): # GH 15342 - df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) expected = df.copy() local_var = 7 @@ -1446,7 +1446,7 @@ def test_multi_line_expression_local_variable(self): def test_multi_line_expression_callable_local_variable(self): # 26426 - df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) def local_func(a, b): return b @@ -1466,7 +1466,7 @@ def local_func(a, b): def test_multi_line_expression_callable_local_variable_with_kwargs(self): # 26426 - df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) def local_func(a, b): return b @@ -1486,7 +1486,7 @@ def local_func(a, b): def test_assignment_in_query(self): # GH 8664 - df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) df_orig = df.copy() msg = "cannot assign without a target object" with pytest.raises(ValueError, match=msg): @@ -1495,7 +1495,7 @@ def test_assignment_in_query(self): def test_query_inplace(self): # see gh-11149 - df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) expected = df.copy() expected = expected[expected["a"] == 2] df.query("a == 2", inplace=True) @@ -2052,7 +2052,7 @@ def test_truediv_deprecated(engine, parser): def test_negate_lt_eq_le(engine, parser): - df = pd.DataFrame([[0, 10], [1, 20]], columns=["cat", "count"]) + df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"]) expected = df[~(df.cat > 0)] result = df.query("~(cat > 0)", engine=engine, parser=parser) diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index 58e91c38fc294..1d01aa48e115f 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -358,7 +358,7 @@ def test_apply_reduce_Series(self, float_frame): def test_apply_reduce_rows_to_dict(self): # GH 25196 - data = pd.DataFrame([[1, 2], [3, 4]]) + data = DataFrame([[1, 2], [3, 4]]) expected = Series([{0: 1, 1: 3}, {0: 2, 1: 4}]) result = data.apply(dict) tm.assert_series_equal(result, expected) @@ -445,7 +445,7 @@ def transform2(row): def test_apply_bug(self): # GH 6125 - positions = pd.DataFrame( + positions = DataFrame( [ [1, "ABC0", 50], [1, "YUM0", 20], @@ -619,10 +619,10 @@ def test_applymap(self, float_frame): # GH 8222 empty_frames = [ - pd.DataFrame(), - pd.DataFrame(columns=list("ABC")), - pd.DataFrame(index=list("ABC")), - pd.DataFrame({"A": [], "B": [], "C": []}), + DataFrame(), + DataFrame(columns=list("ABC")), + DataFrame(index=list("ABC")), + DataFrame({"A": [], "B": [], "C": []}), ] for frame in empty_frames: for func in [round, lambda x: x]: @@ -653,11 +653,11 @@ def func(x): return (x.hour, x.day, x.month) # it works! - pd.DataFrame(ser).applymap(func) + DataFrame(ser).applymap(func) def test_applymap_box(self): # ufunc will not be boxed. Same test cases as the test_map_box - df = pd.DataFrame( + df = DataFrame( { "a": [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")], "b": [ @@ -673,7 +673,7 @@ def test_applymap_box(self): ) result = df.applymap(lambda x: type(x).__name__) - expected = pd.DataFrame( + expected = DataFrame( { "a": ["Timestamp", "Timestamp"], "b": ["Timestamp", "Timestamp"], @@ -713,8 +713,8 @@ def test_apply_non_numpy_dtype(self): def test_apply_dup_names_multi_agg(self): # GH 21063 - df = pd.DataFrame([[0, 1], [2, 3]], columns=["a", "a"]) - expected = pd.DataFrame([[0, 1]], columns=["a", "a"], index=["min"]) + df = DataFrame([[0, 1], [2, 3]], columns=["a", "a"]) + expected = DataFrame([[0, 1]], columns=["a", "a"], index=["min"]) result = df.agg(["min"]) tm.assert_frame_equal(result, expected) @@ -724,7 +724,7 @@ def test_apply_nested_result_axis_1(self): def apply_list(row): return [2 * row["A"], 2 * row["C"], 2 * row["B"]] - df = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCD")) + df = DataFrame(np.zeros((4, 4)), columns=list("ABCD")) result = df.apply(apply_list, axis=1) expected = Series( [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] @@ -733,7 +733,7 @@ def apply_list(row): def test_apply_noreduction_tzaware_object(self): # https://github.com/pandas-dev/pandas/issues/31505 - df = pd.DataFrame( + df = DataFrame( {"foo": [pd.Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]" ) result = df.apply(lambda x: x) @@ -744,7 +744,7 @@ def test_apply_noreduction_tzaware_object(self): def test_apply_function_runs_once(self): # https://github.com/pandas-dev/pandas/issues/30815 - df = pd.DataFrame({"a": [1, 2, 3]}) + df = DataFrame({"a": [1, 2, 3]}) names = [] # Save row names function is applied to def reducing_function(row): @@ -763,7 +763,7 @@ def non_reducing_function(row): def test_apply_raw_function_runs_once(self): # https://github.com/pandas-dev/pandas/issues/34506 - df = pd.DataFrame({"a": [1, 2, 3]}) + df = DataFrame({"a": [1, 2, 3]}) values = [] # Save row values function is applied to def reducing_function(row): @@ -781,7 +781,7 @@ def non_reducing_function(row): def test_applymap_function_runs_once(self): - df = pd.DataFrame({"a": [1, 2, 3]}) + df = DataFrame({"a": [1, 2, 3]}) values = [] # Save values function is applied to def reducing_function(val): @@ -799,8 +799,8 @@ def non_reducing_function(val): def test_apply_with_byte_string(self): # GH 34529 - df = pd.DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"]) - expected = pd.DataFrame( + df = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"]) + expected = DataFrame( np.array([b"abcd", b"efgh"]), columns=["col"], dtype=object ) # After we make the aply we exect a dataframe just @@ -812,7 +812,7 @@ def test_apply_with_byte_string(self): def test_apply_category_equalness(self, val): # Check if categorical comparisons on apply, GH 21239 df_values = ["asd", None, 12, "asd", "cde", np.NaN] - df = pd.DataFrame({"a": df_values}, dtype="category") + df = DataFrame({"a": df_values}, dtype="category") result = df.a.apply(lambda x: x == val) expected = Series( @@ -829,7 +829,7 @@ class TestInferOutputShape: def test_infer_row_shape(self): # GH 17437 # if row shape is changing, infer it - df = pd.DataFrame(np.random.rand(10, 2)) + df = DataFrame(np.random.rand(10, 2)) result = df.apply(np.fft.fft, axis=0) assert result.shape == (10, 2) @@ -954,7 +954,7 @@ def test_infer_output_shape_listlike_columns(self): tm.assert_series_equal(result, expected) # GH 17892 - df = pd.DataFrame( + df = DataFrame( { "a": [ pd.Timestamp("2010-02-01"), @@ -1122,7 +1122,7 @@ def test_transform_and_agg_err(self, axis, float_frame): with np.errstate(all="ignore"): float_frame.agg(["max", "sqrt"], axis=axis) - df = pd.DataFrame({"A": range(5), "B": 5}) + df = DataFrame({"A": range(5), "B": 5}) def f(): with np.errstate(all="ignore"): @@ -1130,7 +1130,7 @@ def f(): def test_demo(self): # demonstration tests - df = pd.DataFrame({"A": range(5), "B": 5}) + df = DataFrame({"A": range(5), "B": 5}) result = df.agg(["min", "max"]) expected = DataFrame( @@ -1149,7 +1149,7 @@ def test_demo(self): def test_agg_with_name_as_column_name(self): # GH 36212 - Column name is "name" data = {"name": ["foo", "bar"]} - df = pd.DataFrame(data) + df = DataFrame(data) # result's name should be None result = df.agg({"name": "count"}) @@ -1163,7 +1163,7 @@ def test_agg_with_name_as_column_name(self): def test_agg_multiple_mixed_no_warning(self): # GH 20909 - mdf = pd.DataFrame( + mdf = DataFrame( { "A": [1, 2, 3], "B": [1.0, 2.0, 3.0], @@ -1171,7 +1171,7 @@ def test_agg_multiple_mixed_no_warning(self): "D": pd.date_range("20130101", periods=3), } ) - expected = pd.DataFrame( + expected = DataFrame( { "A": [1, 6], "B": [1.0, 6.0], @@ -1197,7 +1197,7 @@ def test_agg_multiple_mixed_no_warning(self): def test_agg_dict_nested_renaming_depr(self): - df = pd.DataFrame({"A": range(5), "B": 5}) + df = DataFrame({"A": range(5), "B": 5}) # nested renaming msg = r"nested renamer is not supported" @@ -1343,7 +1343,7 @@ def test_non_callable_aggregates(self): result2 = df.agg( {"A": ["count", "size"], "B": ["count", "size"], "C": ["count", "size"]} ) - expected = pd.DataFrame( + expected = DataFrame( { "A": {"count": 2, "size": 3}, "B": {"count": 2, "size": 3}, @@ -1480,7 +1480,7 @@ def test_agg_args_kwargs(self, axis, args, kwargs): def f(x, a, b, c=3): return x.sum() + (a + b) / c - df = pd.DataFrame([[1, 2], [3, 4]]) + df = DataFrame([[1, 2], [3, 4]]) if axis == 0: expected = Series([5.0, 7.0]) @@ -1514,7 +1514,7 @@ def test_apply_datetime_tz_issue(self): tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("df", [pd.DataFrame({"A": ["a", None], "B": ["c", "d"]})]) + @pytest.mark.parametrize("df", [DataFrame({"A": ["a", None], "B": ["c", "d"]})]) @pytest.mark.parametrize("method", ["min", "max", "sum"]) def test_consistency_of_aggregates_of_columns_with_missing_values(self, df, method): # GH 16832 @@ -1528,7 +1528,7 @@ def test_consistency_of_aggregates_of_columns_with_missing_values(self, df, meth @pytest.mark.parametrize("col", [1, 1.0, True, "a", np.nan]) def test_apply_dtype(self, col): # GH 31466 - df = pd.DataFrame([[1.0, col]], columns=["a", "b"]) + df = DataFrame([[1.0, col]], columns=["a", "b"]) result = df.apply(lambda x: x.dtype) expected = df.dtypes @@ -1537,7 +1537,7 @@ def test_apply_dtype(self, col): def test_apply_mutating(): # GH#35462 case where applied func pins a new BlockManager to a row - df = pd.DataFrame({"a": range(100), "b": range(100, 200)}) + df = DataFrame({"a": range(100), "b": range(100, 200)}) def func(row): mgr = row._mgr @@ -1556,7 +1556,7 @@ def func(row): def test_apply_empty_list_reduce(): # GH#35683 get columns correct - df = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"]) + df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"]) result = df.apply(lambda x: [], result_type="reduce") expected = Series({"a": [], "b": []}, dtype=object) @@ -1565,9 +1565,9 @@ def test_apply_empty_list_reduce(): def test_apply_no_suffix_index(): # GH36189 - pdf = pd.DataFrame([[4, 9]] * 3, columns=["A", "B"]) + pdf = DataFrame([[4, 9]] * 3, columns=["A", "B"]) result = pdf.apply(["sum", lambda x: x.sum(), lambda x: x.sum()]) - expected = pd.DataFrame( + expected = DataFrame( {"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "<lambda>", "<lambda>"] ) @@ -1576,7 +1576,7 @@ def test_apply_no_suffix_index(): def test_apply_raw_returns_string(): # https://github.com/pandas-dev/pandas/issues/35940 - df = pd.DataFrame({"A": ["aa", "bbb"]}) + df = DataFrame({"A": ["aa", "bbb"]}) result = df.apply(lambda x: x[0], axis=1, raw=True) expected = Series(["aa", "bbb"]) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py index 314de5bdd8146..c876f78176e2e 100644 --- a/pandas/tests/frame/indexing/test_categorical.py +++ b/pandas/tests/frame/indexing/test_categorical.py @@ -397,7 +397,7 @@ def test_loc_indexing_preserves_index_category_dtype(self): def test_categorical_filtering(self): # GH22609 Verify filtering operations on DataFrames with categorical Series - df = pd.DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"]) + df = DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"]) df["b"] = df.b.astype("category") result = df.where(df.a > 0) diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 4687d94b52c80..0dee818613edb 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -75,7 +75,7 @@ def test_loc_iterable(self, float_frame, key_type): def test_loc_timedelta_0seconds(self): # GH#10583 - df = pd.DataFrame(np.random.normal(size=(10, 4))) + df = DataFrame(np.random.normal(size=(10, 4))) df.index = pd.timedelta_range(start="0s", periods=10, freq="s") expected = df.loc[pd.Timedelta("0s") :, :] result = df.loc["0s":, :] @@ -200,7 +200,7 @@ def test_setitem_list_of_tuples(self, float_frame): ( ["A", "B", "C", "D"], 7, - pd.DataFrame( + DataFrame( [[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]], columns=["A", "B", "C", "D"], ), @@ -208,7 +208,7 @@ def test_setitem_list_of_tuples(self, float_frame): ( ["C", "D"], [7, 8], - pd.DataFrame( + DataFrame( [[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]], columns=["A", "B", "C", "D"], ), @@ -216,14 +216,12 @@ def test_setitem_list_of_tuples(self, float_frame): ( ["A", "B", "C"], np.array([7, 8, 9], dtype=np.int64), - pd.DataFrame( - [[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"] - ), + DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]), ), ( ["B", "C", "D"], [[7, 8, 9], [10, 11, 12], [13, 14, 15]], - pd.DataFrame( + DataFrame( [[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]], columns=["A", "B", "C", "D"], ), @@ -231,15 +229,15 @@ def test_setitem_list_of_tuples(self, float_frame): ( ["C", "A", "D"], np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64), - pd.DataFrame( + DataFrame( [[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]], columns=["A", "B", "C", "D"], ), ), ( ["A", "C"], - pd.DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]), - pd.DataFrame( + DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]), + DataFrame( [[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"] ), ), @@ -247,7 +245,7 @@ def test_setitem_list_of_tuples(self, float_frame): ) def test_setitem_list_missing_columns(self, columns, box, expected): # GH 29334 - df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"]) df[columns] = box tm.assert_frame_equal(df, expected) @@ -259,7 +257,7 @@ def test_setitem_multi_index(self): cols = MultiIndex.from_product(it) index = pd.date_range("20141006", periods=20) vals = np.random.randint(1, 1000, (len(index), len(cols))) - df = pd.DataFrame(vals, columns=cols, index=index) + df = DataFrame(vals, columns=cols, index=index) i, j = df.index.values.copy(), it[-1][:] @@ -277,10 +275,10 @@ def test_setitem_multi_index(self): def test_setitem_callable(self): # GH 12533 - df = pd.DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]}) + df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]}) df[lambda x: "A"] = [11, 12, 13, 14] - exp = pd.DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]}) + exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]}) tm.assert_frame_equal(df, exp) def test_setitem_other_callable(self): @@ -288,10 +286,10 @@ def test_setitem_other_callable(self): def inc(x): return x + 1 - df = pd.DataFrame([[-1, 1], [1, -1]]) + df = DataFrame([[-1, 1], [1, -1]]) df[df > 0] = inc - expected = pd.DataFrame([[-1, inc], [inc, -1]]) + expected = DataFrame([[-1, inc], [inc, -1]]) tm.assert_frame_equal(df, expected) def test_getitem_boolean( @@ -440,7 +438,7 @@ def test_getitem_ix_mixed_integer(self): tm.assert_frame_equal(result, expected) # 11320 - df = pd.DataFrame( + df = DataFrame( { "rna": (1.5, 2.2, 3.2, 4.5), -1000: [11, 21, 36, 40], @@ -782,7 +780,7 @@ def test_setitem_None(self, float_frame): def test_setitem_empty(self): # GH 9596 - df = pd.DataFrame( + df = DataFrame( {"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]} ) @@ -804,9 +802,9 @@ def test_setitem_empty_frame_with_boolean(self, dtype, kwargs): def test_setitem_with_empty_listlike(self): # GH #17101 index = pd.Index([], name="idx") - result = pd.DataFrame(columns=["A"], index=index) + result = DataFrame(columns=["A"], index=index) result["A"] = [] - expected = pd.DataFrame(columns=["A"], index=index) + expected = DataFrame(columns=["A"], index=index) tm.assert_index_equal(result.index, expected.index) def test_setitem_scalars_no_index(self): @@ -819,7 +817,7 @@ def test_setitem_scalars_no_index(self): def test_getitem_empty_frame_with_boolean(self): # Test for issue #11859 - df = pd.DataFrame() + df = DataFrame() df2 = df[df > 0] tm.assert_frame_equal(df, df2) @@ -887,11 +885,11 @@ def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame): def test_setitem_slice_position(self): # GH#31469 - df = pd.DataFrame(np.zeros((100, 1))) + df = DataFrame(np.zeros((100, 1))) df[-4:] = 1 arr = np.zeros((100, 1)) arr[-4:] = 1 - expected = pd.DataFrame(arr) + expected = DataFrame(arr) tm.assert_frame_equal(df, expected) def test_getitem_setitem_non_ix_labels(self): @@ -1190,7 +1188,7 @@ def test_setitem_mixed_datetime(self): ], } ) - df = pd.DataFrame(0, columns=list("ab"), index=range(6)) + df = DataFrame(0, columns=list("ab"), index=range(6)) df["b"] = pd.NaT df.loc[0, "b"] = datetime(2012, 1, 1) df.loc[1, "b"] = 1 @@ -1392,7 +1390,7 @@ def test_lookup_raises(self, float_frame): def test_lookup_requires_unique_axes(self): # GH#33041 raise with a helpful error message - df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "A"]) + df = DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "A"]) rows = [0, 1] cols = ["A", "A"] @@ -1481,7 +1479,7 @@ def test_reindex_with_multi_index(self): # 1: 2.0 # 2: 5.0 # 3: 5.8 - df = pd.DataFrame( + df = DataFrame( { "a": [-1] * 7 + [0] * 7 + [1] * 7, "b": list(range(7)) * 3, @@ -1493,13 +1491,13 @@ def test_reindex_with_multi_index(self): # reindexing w/o a `method` value reindexed = df.reindex(new_multi_index) - expected = pd.DataFrame( + expected = DataFrame( {"a": [0] * 4, "b": new_index, "c": [np.nan, "C", "F", np.nan]} ).set_index(["a", "b"]) tm.assert_frame_equal(expected, reindexed) # reindexing with backfilling - expected = pd.DataFrame( + expected = DataFrame( {"a": [0] * 4, "b": new_index, "c": ["B", "C", "F", "G"]} ).set_index(["a", "b"]) reindexed_with_backfilling = df.reindex(new_multi_index, method="bfill") @@ -1509,7 +1507,7 @@ def test_reindex_with_multi_index(self): tm.assert_frame_equal(expected, reindexed_with_backfilling) # reindexing with padding - expected = pd.DataFrame( + expected = DataFrame( {"a": [0] * 4, "b": new_index, "c": ["A", "C", "F", "F"]} ).set_index(["a", "b"]) reindexed_with_padding = df.reindex(new_multi_index, method="pad") @@ -1560,7 +1558,7 @@ def test_single_element_ix_dont_upcast(self, float_frame): assert is_integer(result) # GH 11617 - df = pd.DataFrame(dict(a=[1.23])) + df = DataFrame(dict(a=[1.23])) df["b"] = 666 result = df.loc[0, "b"] @@ -1660,19 +1658,19 @@ def test_loc_duplicates(self): trange = trange.insert(loc=5, item=pd.Timestamp(year=2017, month=1, day=5)) - df = pd.DataFrame(0, index=trange, columns=["A", "B"]) + df = DataFrame(0, index=trange, columns=["A", "B"]) bool_idx = np.array([False, False, False, False, False, True]) # assignment df.loc[trange[bool_idx], "A"] = 6 - expected = pd.DataFrame( + expected = DataFrame( {"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange ) tm.assert_frame_equal(df, expected) # in-place - df = pd.DataFrame(0, index=trange, columns=["A", "B"]) + df = DataFrame(0, index=trange, columns=["A", "B"]) df.loc[trange[bool_idx], "A"] += 6 tm.assert_frame_equal(df, expected) @@ -1685,10 +1683,10 @@ def test_loc_duplicates(self): ], ) def test_reindex_methods(self, method, expected_values): - df = pd.DataFrame({"x": list(range(5))}) + df = DataFrame({"x": list(range(5))}) target = np.array([-0.1, 0.9, 1.1, 1.5]) - expected = pd.DataFrame({"x": expected_values}, index=target) + expected = DataFrame({"x": expected_values}, index=target) actual = df.reindex(target, method=method) tm.assert_frame_equal(expected, actual) @@ -1713,14 +1711,14 @@ def test_reindex_methods(self, method, expected_values): tm.assert_frame_equal(expected, actual) def test_reindex_methods_nearest_special(self): - df = pd.DataFrame({"x": list(range(5))}) + df = DataFrame({"x": list(range(5))}) target = np.array([-0.1, 0.9, 1.1, 1.5]) - expected = pd.DataFrame({"x": [0, 1, 1, np.nan]}, index=target) + expected = DataFrame({"x": [0, 1, 1, np.nan]}, index=target) actual = df.reindex(target, method="nearest", tolerance=0.2) tm.assert_frame_equal(expected, actual) - expected = pd.DataFrame({"x": [0, np.nan, 1, np.nan]}, index=target) + expected = DataFrame({"x": [0, np.nan, 1, np.nan]}, index=target) actual = df.reindex(target, method="nearest", tolerance=[0.5, 0.01, 0.4, 0.1]) tm.assert_frame_equal(expected, actual) @@ -1728,7 +1726,7 @@ def test_reindex_nearest_tz(self, tz_aware_fixture): # GH26683 tz = tz_aware_fixture idx = pd.date_range("2019-01-01", periods=5, tz=tz) - df = pd.DataFrame({"x": list(range(5))}, index=idx) + df = DataFrame({"x": list(range(5))}, index=idx) expected = df.head(3) actual = df.reindex(idx[:3], method="nearest") @@ -1737,8 +1735,8 @@ def test_reindex_nearest_tz(self, tz_aware_fixture): def test_reindex_nearest_tz_empty_frame(self): # https://github.com/pandas-dev/pandas/issues/31964 dti = pd.DatetimeIndex(["2016-06-26 14:27:26+00:00"]) - df = pd.DataFrame(index=pd.DatetimeIndex(["2016-07-04 14:00:59+00:00"])) - expected = pd.DataFrame(index=dti) + df = DataFrame(index=pd.DatetimeIndex(["2016-07-04 14:00:59+00:00"])) + expected = DataFrame(index=dti) result = df.reindex(dti, method="nearest") tm.assert_frame_equal(result, expected) @@ -1776,8 +1774,8 @@ def test_set_dataframe_column_ns_dtype(self): def test_non_monotonic_reindex_methods(self): dr = pd.date_range("2013-08-01", periods=6, freq="B") data = np.random.randn(6, 1) - df = pd.DataFrame(data, index=dr, columns=list("A")) - df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], columns=list("A")) + df = DataFrame(data, index=dr, columns=list("A")) + df_rev = DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], columns=list("A")) # index is not monotonic increasing or decreasing msg = "index must be monotonic increasing or decreasing" with pytest.raises(ValueError, match=msg): @@ -1808,7 +1806,7 @@ def verify(df, level, idx, indexer, check_index_type=True): right = df.iloc[indexer].set_index(icol) tm.assert_frame_equal(left, right, check_index_type=check_index_type) - df = pd.DataFrame( + df = DataFrame( { "jim": list("B" * 4 + "A" * 2 + "C" * 3), "joe": list("abcdeabcd")[::-1], @@ -1886,7 +1884,7 @@ def verify(df, level, idx, indexer, check_index_type=True): verify(df, "joe", ["3rd", "1st"], i) def test_getitem_ix_float_duplicates(self): - df = pd.DataFrame( + df = DataFrame( np.random.randn(3, 3), index=[0.1, 0.2, 0.2], columns=list("abc") ) expect = df.iloc[1:] @@ -1902,7 +1900,7 @@ def test_getitem_ix_float_duplicates(self): expect = df.iloc[1:, 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) - df = pd.DataFrame( + df = DataFrame( np.random.randn(4, 3), index=[1, 0.2, 0.2, 1], columns=list("abc") ) expect = df.iloc[1:-1] @@ -1923,17 +1921,17 @@ def test_setitem_with_unaligned_tz_aware_datetime_column(self): # Assignment of unaligned offset-aware datetime series. # Make sure timezone isn't lost column = Series(pd.date_range("2015-01-01", periods=3, tz="utc"), name="dates") - df = pd.DataFrame({"dates": column}) + df = DataFrame({"dates": column}) df["dates"] = column[[1, 0, 2]] tm.assert_series_equal(df["dates"], column) - df = pd.DataFrame({"dates": column}) + df = DataFrame({"dates": column}) df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]] tm.assert_series_equal(df["dates"], column) def test_setitem_datetime_coercion(self): # gh-1048 - df = pd.DataFrame({"c": [pd.Timestamp("2010-10-01")] * 3}) + df = DataFrame({"c": [pd.Timestamp("2010-10-01")] * 3}) df.loc[0:1, "c"] = np.datetime64("2008-08-08") assert pd.Timestamp("2008-08-08") == df.loc[0, "c"] assert pd.Timestamp("2008-08-08") == df.loc[1, "c"] @@ -2140,7 +2138,7 @@ def test_type_error_multiindex(self): def test_interval_index(self): # GH 19977 index = pd.interval_range(start=0, periods=3) - df = pd.DataFrame( + df = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=index, columns=["A", "B", "C"] ) @@ -2149,7 +2147,7 @@ def test_interval_index(self): tm.assert_almost_equal(result, expected) index = pd.interval_range(start=0, periods=3, closed="both") - df = pd.DataFrame( + df = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=index, columns=["A", "B", "C"] ) @@ -2160,7 +2158,7 @@ def test_interval_index(self): def test_getitem_interval_index_partial_indexing(self): # GH#36490 - df = pd.DataFrame( + df = DataFrame( np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5)) ) @@ -2218,7 +2216,7 @@ def test_set_reset(self): def test_object_casting_indexing_wraps_datetimelike(): # GH#31649, check the indexing methods all the way down the stack - df = pd.DataFrame( + df = DataFrame( { "A": [1, 2], "B": pd.date_range("2000", periods=2), @@ -2257,7 +2255,7 @@ def test_object_casting_indexing_wraps_datetimelike(): def test_lookup_deprecated(): # GH18262 - df = pd.DataFrame( + df = DataFrame( {"col": ["A", "A", "B", "B"], "A": [80, 23, np.nan, 22], "B": [80, 55, 76, 67]} ) with tm.assert_produces_warning(FutureWarning): diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index d114a3178b686..95209c0c35195 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -399,7 +399,7 @@ def test_where_none(self): def test_where_empty_df_and_empty_cond_having_non_bool_dtypes(self): # see gh-21947 - df = pd.DataFrame(columns=["a"]) + df = DataFrame(columns=["a"]) cond = df assert (cond.dtypes == object).all() diff --git a/pandas/tests/frame/methods/test_align.py b/pandas/tests/frame/methods/test_align.py index 8fdaa27144aed..36a57fadff623 100644 --- a/pandas/tests/frame/methods/test_align.py +++ b/pandas/tests/frame/methods/test_align.py @@ -197,8 +197,8 @@ def test_align_multiindex(self): [range(2), range(3), range(2)], names=("a", "b", "c") ) idx = pd.Index(range(2), name="b") - df1 = pd.DataFrame(np.arange(12, dtype="int64"), index=midx) - df2 = pd.DataFrame(np.arange(2, dtype="int64"), index=idx) + df1 = DataFrame(np.arange(12, dtype="int64"), index=midx) + df2 = DataFrame(np.arange(2, dtype="int64"), index=idx) # these must be the same results (but flipped) res1l, res1r = df1.align(df2, join="left") @@ -207,7 +207,7 @@ def test_align_multiindex(self): expl = df1 tm.assert_frame_equal(expl, res1l) tm.assert_frame_equal(expl, res2r) - expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx) + expr = DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx) tm.assert_frame_equal(expr, res1r) tm.assert_frame_equal(expr, res2l) @@ -217,20 +217,20 @@ def test_align_multiindex(self): exp_idx = pd.MultiIndex.from_product( [range(2), range(2), range(2)], names=("a", "b", "c") ) - expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx) + expl = DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx) tm.assert_frame_equal(expl, res1l) tm.assert_frame_equal(expl, res2r) - expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx) + expr = DataFrame([0, 0, 1, 1] * 2, index=exp_idx) tm.assert_frame_equal(expr, res1r) tm.assert_frame_equal(expr, res2l) def test_align_series_combinations(self): - df = pd.DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")) + df = DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")) s = Series([1, 2, 4], index=list("ABD"), name="x") # frame + series res1, res2 = df.align(s, axis=0) - exp1 = pd.DataFrame( + exp1 = DataFrame( {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, index=list("ABCDE"), ) diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py index e4c469dd888b4..133e8c03fab3d 100644 --- a/pandas/tests/frame/methods/test_append.py +++ b/pandas/tests/frame/methods/test_append.py @@ -177,7 +177,7 @@ def test_append_dtypes(self): def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp): # GH 30238 tz = tz_naive_fixture - df = pd.DataFrame([pd.Timestamp(timestamp, tz=tz)]) + df = DataFrame([pd.Timestamp(timestamp, tz=tz)]) result = df.append(df.iloc[0]).iloc[-1] expected = Series(pd.Timestamp(timestamp, tz=tz), name=0) tm.assert_series_equal(result, expected) @@ -193,7 +193,7 @@ def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp): ], ) def test_other_dtypes(self, data, dtype): - df = pd.DataFrame(data, dtype=dtype) + df = DataFrame(data, dtype=dtype) result = df.append(df.iloc[0]).iloc[-1] expected = Series(data, name=0, dtype=dtype) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_clip.py b/pandas/tests/frame/methods/test_clip.py index ca62b56664518..2da6c6e3f0a51 100644 --- a/pandas/tests/frame/methods/test_clip.py +++ b/pandas/tests/frame/methods/test_clip.py @@ -1,7 +1,6 @@ import numpy as np import pytest -import pandas as pd from pandas import DataFrame, Series import pandas._testing as tm @@ -100,7 +99,7 @@ def test_clip_against_list_like(self, simple_frame, inplace, lower, axis, res): result = original.clip(lower=lower, upper=[5, 6, 7], axis=axis, inplace=inplace) - expected = pd.DataFrame(res, columns=original.columns, index=original.index) + expected = DataFrame(res, columns=original.columns, index=original.index) if inplace: result = original tm.assert_frame_equal(result, expected, check_exact=True) diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py index 78f265d32f8df..d1f38d90547fd 100644 --- a/pandas/tests/frame/methods/test_combine_first.py +++ b/pandas/tests/frame/methods/test_combine_first.py @@ -18,7 +18,7 @@ def test_combine_first_mixed(self): b = Series(range(2), index=range(5, 7)) g = DataFrame({"A": a, "B": b}) - exp = pd.DataFrame( + exp = DataFrame( {"A": list("abab"), "B": [0.0, 1.0, 0.0, 1.0]}, index=[0, 1, 5, 6] ) combined = f.combine_first(g) @@ -169,13 +169,13 @@ def test_combine_first_mixed_bug(self): def test_combine_first_align_nan(self): # GH 7509 (not fixed) - dfa = pd.DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"]) - dfb = pd.DataFrame([[4], [5]], columns=["b"]) + dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"]) + dfb = DataFrame([[4], [5]], columns=["b"]) assert dfa["a"].dtype == "datetime64[ns]" assert dfa["b"].dtype == "int64" res = dfa.combine_first(dfb) - exp = pd.DataFrame( + exp = DataFrame( {"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2.0, 5.0]}, columns=["a", "b"], ) @@ -185,7 +185,7 @@ def test_combine_first_align_nan(self): assert res["b"].dtype == "float64" res = dfa.iloc[:0].combine_first(dfb) - exp = pd.DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"]) + exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"]) tm.assert_frame_equal(res, exp) # ToDo: this must be datetime64 assert res["a"].dtype == "float64" @@ -195,21 +195,21 @@ def test_combine_first_align_nan(self): def test_combine_first_timezone(self): # see gh-7630 data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC") - df1 = pd.DataFrame( + df1 = DataFrame( columns=["UTCdatetime", "abc"], data=data1, index=pd.date_range("20140627", periods=1), dtype="object", ) data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC") - df2 = pd.DataFrame( + df2 = DataFrame( columns=["UTCdatetime", "xyz"], data=data2, index=pd.date_range("20140628", periods=1), dtype="object", ) res = df2[["UTCdatetime"]].combine_first(df1) - exp = pd.DataFrame( + exp = DataFrame( { "UTCdatetime": [ pd.Timestamp("2010-01-01 01:01", tz="UTC"), @@ -230,9 +230,9 @@ def test_combine_first_timezone(self): # see gh-10567 dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC") - df1 = pd.DataFrame({"DATE": dts1}) + df1 = DataFrame({"DATE": dts1}) dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC") - df2 = pd.DataFrame({"DATE": dts2}) + df2 = DataFrame({"DATE": dts2}) res = df1.combine_first(df2) tm.assert_frame_equal(res, df1) @@ -241,11 +241,11 @@ def test_combine_first_timezone(self): dts1 = pd.DatetimeIndex( ["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern" ) - df1 = pd.DataFrame({"DATE": dts1}, index=[1, 3, 5, 7]) + df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7]) dts2 = pd.DatetimeIndex( ["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern" ) - df2 = pd.DataFrame({"DATE": dts2}, index=[2, 4, 5]) + df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = pd.DatetimeIndex( @@ -259,14 +259,14 @@ def test_combine_first_timezone(self): ], tz="US/Eastern", ) - exp = pd.DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7]) + exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) # different tz dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern") - df1 = pd.DataFrame({"DATE": dts1}) + df1 = DataFrame({"DATE": dts1}) dts2 = pd.date_range("2015-01-03", "2015-01-05") - df2 = pd.DataFrame({"DATE": dts2}) + df2 = DataFrame({"DATE": dts2}) # if df1 doesn't have NaN, keep its dtype res = df1.combine_first(df2) @@ -274,9 +274,9 @@ def test_combine_first_timezone(self): assert res["DATE"].dtype == "datetime64[ns, US/Eastern]" dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern") - df1 = pd.DataFrame({"DATE": dts1}) + df1 = DataFrame({"DATE": dts1}) dts2 = pd.date_range("2015-01-01", "2015-01-03") - df2 = pd.DataFrame({"DATE": dts2}) + df2 = DataFrame({"DATE": dts2}) res = df1.combine_first(df2) exp_dts = [ @@ -284,41 +284,41 @@ def test_combine_first_timezone(self): pd.Timestamp("2015-01-02", tz="US/Eastern"), pd.Timestamp("2015-01-03"), ] - exp = pd.DataFrame({"DATE": exp_dts}) + exp = DataFrame({"DATE": exp_dts}) tm.assert_frame_equal(res, exp) assert res["DATE"].dtype == "object" def test_combine_first_timedelta(self): data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"]) - df1 = pd.DataFrame({"TD": data1}, index=[1, 3, 5, 7]) + df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7]) data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"]) - df2 = pd.DataFrame({"TD": data2}, index=[2, 4, 5]) + df2 = DataFrame({"TD": data2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = pd.TimedeltaIndex( ["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"] ) - exp = pd.DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7]) + exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) assert res["TD"].dtype == "timedelta64[ns]" def test_combine_first_period(self): data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M") - df1 = pd.DataFrame({"P": data1}, index=[1, 3, 5, 7]) + df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7]) data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M") - df2 = pd.DataFrame({"P": data2}, index=[2, 4, 5]) + df2 = DataFrame({"P": data2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = pd.PeriodIndex( ["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M" ) - exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7]) + exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) assert res["P"].dtype == data1.dtype # different freq dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D") - df2 = pd.DataFrame({"P": dts2}, index=[2, 4, 5]) + df2 = DataFrame({"P": dts2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = [ @@ -329,15 +329,15 @@ def test_combine_first_period(self): pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M"), ] - exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7]) + exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) assert res["P"].dtype == "object" def test_combine_first_int(self): # GH14687 - integer series that do no align exactly - df1 = pd.DataFrame({"a": [0, 1, 3, 5]}, dtype="int64") - df2 = pd.DataFrame({"a": [1, 4]}, dtype="int64") + df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64") + df2 = DataFrame({"a": [1, 4]}, dtype="int64") res = df1.combine_first(df2) tm.assert_frame_equal(res, df1) @@ -346,10 +346,10 @@ def test_combine_first_int(self): @pytest.mark.parametrize("val", [1, 1.0]) def test_combine_first_with_asymmetric_other(self, val): # see gh-20699 - df1 = pd.DataFrame({"isNum": [val]}) - df2 = pd.DataFrame({"isBool": [True]}) + df1 = DataFrame({"isNum": [val]}) + df2 = DataFrame({"isBool": [True]}) res = df1.combine_first(df2) - exp = pd.DataFrame({"isBool": [True], "isNum": [val]}) + exp = DataFrame({"isBool": [True], "isNum": [val]}) tm.assert_frame_equal(res, exp) diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index 87c9dc32650c0..7eeeb245534f5 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -74,10 +74,10 @@ def test_cov_ddof(self, test_ddof): ) def test_cov_nullable_integer(self, other_column): # https://github.com/pandas-dev/pandas/issues/33803 - data = pd.DataFrame({"a": pd.array([1, 2, None]), "b": other_column}) + data = DataFrame({"a": pd.array([1, 2, None]), "b": other_column}) result = data.cov() arr = np.array([[0.5, 0.5], [0.5, 1.0]]) - expected = pd.DataFrame(arr, columns=["a", "b"], index=["a", "b"]) + expected = DataFrame(arr, columns=["a", "b"], index=["a", "b"]) tm.assert_frame_equal(result, expected) @@ -155,7 +155,7 @@ def test_corr_int_and_boolean(self): def test_corr_cov_independent_index_column(self): # GH#14617 - df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4), columns=list("abcd")) + df = DataFrame(np.random.randn(4 * 10).reshape(10, 4), columns=list("abcd")) for method in ["cov", "corr"]: result = getattr(df, method)() assert result.index is not result.columns @@ -163,7 +163,7 @@ def test_corr_cov_independent_index_column(self): def test_corr_invalid_method(self): # GH#22298 - df = pd.DataFrame(np.random.normal(size=(10, 2))) + df = DataFrame(np.random.normal(size=(10, 2))) msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, " with pytest.raises(ValueError, match=msg): df.corr(method="____") @@ -186,15 +186,15 @@ def test_corr_int(self): @pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"]) def test_corr_nullable_integer(self, nullable_column, other_column, method): # https://github.com/pandas-dev/pandas/issues/33803 - data = pd.DataFrame({"a": nullable_column, "b": other_column}) + data = DataFrame({"a": nullable_column, "b": other_column}) result = data.corr(method=method) - expected = pd.DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"]) + expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"]) tm.assert_frame_equal(result, expected) def test_corr_item_cache(self): # Check that corr does not lead to incorrect entries in item_cache - df = pd.DataFrame({"A": range(10)}) + df = DataFrame({"A": range(10)}) df["B"] = range(10)[::-1] ser = df["A"] # populate item_cache @@ -275,7 +275,7 @@ def test_corrwith_matches_corrcoef(self): def test_corrwith_mixed_dtypes(self): # GH#18570 - df = pd.DataFrame( + df = DataFrame( {"a": [1, 4, 3, 2], "b": [4, 6, 7, 3], "c": ["a", "b", "c", "d"]} ) s = Series([0, 6, 7, 3]) @@ -285,16 +285,16 @@ def test_corrwith_mixed_dtypes(self): tm.assert_series_equal(result, expected) def test_corrwith_index_intersection(self): - df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"]) - df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"]) + df1 = DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"]) + df2 = DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"]) result = df1.corrwith(df2, drop=True).index.sort_values() expected = df1.columns.intersection(df2.columns).sort_values() tm.assert_index_equal(result, expected) def test_corrwith_index_union(self): - df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"]) - df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"]) + df1 = DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"]) + df2 = DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"]) result = df1.corrwith(df2, drop=False).index.sort_values() expected = df1.columns.union(df2.columns).sort_values() @@ -302,7 +302,7 @@ def test_corrwith_index_union(self): def test_corrwith_dup_cols(self): # GH#21925 - df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T) + df1 = DataFrame(np.vstack([np.arange(10)] * 3).T) df2 = df1.copy() df2 = pd.concat((df2, df2[0]), axis=1) @@ -313,7 +313,7 @@ def test_corrwith_dup_cols(self): @td.skip_if_no_scipy def test_corrwith_spearman(self): # GH#21925 - df = pd.DataFrame(np.random.random(size=(100, 3))) + df = DataFrame(np.random.random(size=(100, 3))) result = df.corrwith(df ** 2, method="spearman") expected = Series(np.ones(len(result))) tm.assert_series_equal(result, expected) @@ -321,7 +321,7 @@ def test_corrwith_spearman(self): @td.skip_if_no_scipy def test_corrwith_kendall(self): # GH#21925 - df = pd.DataFrame(np.random.random(size=(100, 3))) + df = DataFrame(np.random.random(size=(100, 3))) result = df.corrwith(df ** 2, method="kendall") expected = Series(np.ones(len(result))) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py index d10d4c8ea05ab..0358bc3c04539 100644 --- a/pandas/tests/frame/methods/test_describe.py +++ b/pandas/tests/frame/methods/test_describe.py @@ -34,9 +34,9 @@ def test_describe_bool_in_mixed_frame(self): def test_describe_empty_object(self): # GH#27183 - df = pd.DataFrame({"A": [None, None]}, dtype=object) + df = DataFrame({"A": [None, None]}, dtype=object) result = df.describe() - expected = pd.DataFrame( + expected = DataFrame( {"A": [0, 0, np.nan, np.nan]}, dtype=object, index=["count", "unique", "top", "freq"], @@ -48,7 +48,7 @@ def test_describe_empty_object(self): def test_describe_bool_frame(self): # GH#13891 - df = pd.DataFrame( + df = DataFrame( { "bool_data_1": [False, False, True, True], "bool_data_2": [False, True, True, True], @@ -61,7 +61,7 @@ def test_describe_bool_frame(self): ) tm.assert_frame_equal(result, expected) - df = pd.DataFrame( + df = DataFrame( { "bool_data": [False, False, True, True, False], "int_data": [0, 1, 2, 3, 4], @@ -74,7 +74,7 @@ def test_describe_bool_frame(self): ) tm.assert_frame_equal(result, expected) - df = pd.DataFrame( + df = DataFrame( {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]} ) result = df.describe() @@ -119,7 +119,7 @@ def test_describe_empty_categorical_column(self): # GH#26397 # Ensure the index of an an empty categorical DataFrame column # also contains (count, unique, top, freq) - df = pd.DataFrame({"empty_col": Categorical([])}) + df = DataFrame({"empty_col": Categorical([])}) result = df.describe() expected = DataFrame( {"empty_col": [0, 0, np.nan, np.nan]}, @@ -198,7 +198,7 @@ def test_describe_timedelta_values(self): # GH#6145 t1 = pd.timedelta_range("1 days", freq="D", periods=5) t2 = pd.timedelta_range("1 hours", freq="H", periods=5) - df = pd.DataFrame({"t1": t1, "t2": t2}) + df = DataFrame({"t1": t1, "t2": t2}) expected = DataFrame( { @@ -249,7 +249,7 @@ def test_describe_tz_values(self, tz_naive_fixture): start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) - df = pd.DataFrame({"s1": s1, "s2": s2}) + df = DataFrame({"s1": s1, "s2": s2}) expected = DataFrame( { @@ -271,9 +271,9 @@ def test_describe_tz_values(self, tz_naive_fixture): tm.assert_frame_equal(result, expected) def test_datetime_is_numeric_includes_datetime(self): - df = pd.DataFrame({"a": pd.date_range("2012", periods=3), "b": [1, 2, 3]}) + df = DataFrame({"a": pd.date_range("2012", periods=3), "b": [1, 2, 3]}) result = df.describe(datetime_is_numeric=True) - expected = pd.DataFrame( + expected = DataFrame( { "a": [ 3, @@ -297,7 +297,7 @@ def test_describe_tz_values2(self): start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) - df = pd.DataFrame({"s1": s1, "s2": s2}) + df = DataFrame({"s1": s1, "s2": s2}) s1_ = s1.describe() s2_ = Series( @@ -334,7 +334,7 @@ def test_describe_tz_values2(self): def test_describe_percentiles_integer_idx(self): # GH#26660 - df = pd.DataFrame({"x": [1]}) + df = DataFrame({"x": [1]}) pct = np.linspace(0, 1, 10 + 1) result = df.describe(percentiles=pct) diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py index 9ef6ba5f410a9..8affcce478cf4 100644 --- a/pandas/tests/frame/methods/test_diff.py +++ b/pandas/tests/frame/methods/test_diff.py @@ -8,7 +8,7 @@ class TestDataFrameDiff: def test_diff_requires_integer(self): - df = pd.DataFrame(np.random.randn(2, 2)) + df = DataFrame(np.random.randn(2, 2)) with pytest.raises(ValueError, match="periods must be an integer"): df.diff(1.5) @@ -33,10 +33,10 @@ def test_diff(self, datetime_frame): tm.assert_series_equal(the_diff["A"], tf["A"] - tf["A"].shift(1)) # GH#10907 - df = pd.DataFrame({"y": Series([2]), "z": Series([3])}) + df = DataFrame({"y": Series([2]), "z": Series([3])}) df.insert(0, "x", 1) result = df.diff(axis=1) - expected = pd.DataFrame({"x": np.nan, "y": Series(1), "z": Series(1)}) + expected = DataFrame({"x": np.nan, "y": Series(1), "z": Series(1)}) tm.assert_frame_equal(result, expected) def test_diff_timedelta64_with_nat(self): @@ -44,12 +44,10 @@ def test_diff_timedelta64_with_nat(self): arr = np.arange(6).reshape(3, 2).astype("timedelta64[ns]") arr[:, 0] = np.timedelta64("NaT", "ns") - df = pd.DataFrame(arr) + df = DataFrame(arr) result = df.diff(1, axis=0) - expected = pd.DataFrame( - {0: df[0], 1: [pd.NaT, pd.Timedelta(2), pd.Timedelta(2)]} - ) + expected = DataFrame({0: df[0], 1: [pd.NaT, pd.Timedelta(2), pd.Timedelta(2)]}) tm.assert_equal(result, expected) result = df.diff(0) @@ -176,7 +174,7 @@ def test_diff_axis(self): def test_diff_period(self): # GH#32995 Don't pass an incorrect axis pi = pd.date_range("2016-01-01", periods=3).to_period("D") - df = pd.DataFrame({"A": pi}) + df = DataFrame({"A": pi}) result = df.diff(1, axis=1) @@ -185,24 +183,24 @@ def test_diff_period(self): def test_diff_axis1_mixed_dtypes(self): # GH#32995 operate column-wise when we have mixed dtypes and axis=1 - df = pd.DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)}) + df = DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)}) - expected = pd.DataFrame({"A": [np.nan, np.nan, np.nan], "B": df["B"] / 2}) + expected = DataFrame({"A": [np.nan, np.nan, np.nan], "B": df["B"] / 2}) result = df.diff(axis=1) tm.assert_frame_equal(result, expected) # GH#21437 mixed-float-dtypes - df = pd.DataFrame( + df = DataFrame( {"a": np.arange(3, dtype="float32"), "b": np.arange(3, dtype="float64")} ) result = df.diff(axis=1) - expected = pd.DataFrame({"a": df["a"] * np.nan, "b": df["b"] * 0}) + expected = DataFrame({"a": df["a"] * np.nan, "b": df["b"] * 0}) tm.assert_frame_equal(result, expected) def test_diff_axis1_mixed_dtypes_large_periods(self): # GH#32995 operate column-wise when we have mixed dtypes and axis=1 - df = pd.DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)}) + df = DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)}) expected = df * np.nan @@ -211,19 +209,19 @@ def test_diff_axis1_mixed_dtypes_large_periods(self): def test_diff_axis1_mixed_dtypes_negative_periods(self): # GH#32995 operate column-wise when we have mixed dtypes and axis=1 - df = pd.DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)}) + df = DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)}) - expected = pd.DataFrame({"A": -1.0 * df["A"], "B": df["B"] * np.nan}) + expected = DataFrame({"A": -1.0 * df["A"], "B": df["B"] * np.nan}) result = df.diff(axis=1, periods=-1) tm.assert_frame_equal(result, expected) def test_diff_sparse(self): # GH#28813 .diff() should work for sparse dataframes as well - sparse_df = pd.DataFrame([[0, 1], [1, 0]], dtype="Sparse[int]") + sparse_df = DataFrame([[0, 1], [1, 0]], dtype="Sparse[int]") result = sparse_df.diff() - expected = pd.DataFrame( + expected = DataFrame( [[np.nan, np.nan], [1.0, -1.0]], dtype=pd.SparseDtype("float", 0.0) ) @@ -234,7 +232,7 @@ def test_diff_sparse(self): [ ( 0, - pd.DataFrame( + DataFrame( { "a": [np.nan, 0, 1, 0, np.nan, np.nan, np.nan, 0], "b": [np.nan, 1, np.nan, np.nan, -2, 1, np.nan, np.nan], @@ -246,7 +244,7 @@ def test_diff_sparse(self): ), ( 1, - pd.DataFrame( + DataFrame( { "a": np.repeat(np.nan, 8), "b": [0, 1, np.nan, 1, np.nan, np.nan, np.nan, 0], @@ -260,7 +258,7 @@ def test_diff_sparse(self): ) def test_diff_integer_na(self, axis, expected): # GH#24171 IntegerNA Support for DataFrame.diff() - df = pd.DataFrame( + df = DataFrame( { "a": np.repeat([0, 1, np.nan, 2], 2), "b": np.tile([0, 1, np.nan, 2], 2), @@ -278,7 +276,7 @@ def test_diff_readonly(self): # https://github.com/pandas-dev/pandas/issues/35559 arr = np.random.randn(5, 2) arr.flags.writeable = False - df = pd.DataFrame(arr) + df = DataFrame(arr) result = df.diff() - expected = pd.DataFrame(np.array(df)).diff() + expected = DataFrame(np.array(df)).diff() tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index da369658078a0..c45d774b3bb9e 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -21,7 +21,7 @@ def test_drop_raise_exception_if_labels_not_in_level(msg, labels, level): # GH 8594 mi = pd.MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) s = pd.Series([10, 20, 30], index=mi) - df = pd.DataFrame([10, 20, 30], index=mi) + df = DataFrame([10, 20, 30], index=mi) with pytest.raises(KeyError, match=msg): s.drop(labels, level=level) @@ -34,7 +34,7 @@ def test_drop_errors_ignore(labels, level): # GH 8594 mi = pd.MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) s = pd.Series([10, 20, 30], index=mi) - df = pd.DataFrame([10, 20, 30], index=mi) + df = DataFrame([10, 20, 30], index=mi) expected_s = s.drop(labels, level=level, errors="ignore") tm.assert_series_equal(s, expected_s) @@ -47,7 +47,7 @@ def test_drop_with_non_unique_datetime_index_and_invalid_keys(): # GH 30399 # define dataframe with unique datetime index - df = pd.DataFrame( + df = DataFrame( np.random.randn(5, 3), columns=["a", "b", "c"], index=pd.date_range("2012", freq="H", periods=5), @@ -148,7 +148,7 @@ def test_drop(self): # inplace cache issue # GH#5628 - df = pd.DataFrame(np.random.randn(10, 3), columns=list("abc")) + df = DataFrame(np.random.randn(10, 3), columns=list("abc")) expected = df[~(df.b > 0)] return_value = df.drop(labels=df[df.b > 0].index, inplace=True) assert return_value is None @@ -252,15 +252,15 @@ def test_raise_on_drop_duplicate_index(self, actual): def test_drop_empty_list(self, index, drop_labels): # GH#21494 expected_index = [i for i in index if i not in drop_labels] - frame = pd.DataFrame(index=index).drop(drop_labels) - tm.assert_frame_equal(frame, pd.DataFrame(index=expected_index)) + frame = DataFrame(index=index).drop(drop_labels) + tm.assert_frame_equal(frame, DataFrame(index=expected_index)) @pytest.mark.parametrize("index", [[1, 2, 3], [1, 2, 2]]) @pytest.mark.parametrize("drop_labels", [[1, 4], [4, 5]]) def test_drop_non_empty_list(self, index, drop_labels): # GH# 21494 with pytest.raises(KeyError, match="not found in axis"): - pd.DataFrame(index=index).drop(drop_labels) + DataFrame(index=index).drop(drop_labels) def test_mixed_depth_drop(self): arrays = [ @@ -427,7 +427,7 @@ def test_drop_preserve_names(self): @pytest.mark.parametrize("inplace", [False, True]) def test_inplace_drop_and_operation(self, operation, inplace): # GH#30484 - df = pd.DataFrame({"x": range(5)}) + df = DataFrame({"x": range(5)}) expected = df.copy() df["y"] = range(5) y = df["y"] diff --git a/pandas/tests/frame/methods/test_filter.py b/pandas/tests/frame/methods/test_filter.py index 569b2fe21d1c2..af77db4058b43 100644 --- a/pandas/tests/frame/methods/test_filter.py +++ b/pandas/tests/frame/methods/test_filter.py @@ -133,7 +133,7 @@ def test_filter_corner(self): def test_filter_regex_non_string(self): # GH#5798 trying to filter on non-string columns should drop, # not raise - df = pd.DataFrame(np.random.random((3, 2)), columns=["STRING", 123]) + df = DataFrame(np.random.random((3, 2)), columns=["STRING", 123]) result = df.filter(regex="STRING") expected = df[["STRING"]] tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py index fb3fbacaf2627..5e50e63016f26 100644 --- a/pandas/tests/frame/methods/test_isin.py +++ b/pandas/tests/frame/methods/test_isin.py @@ -87,7 +87,7 @@ def test_isin_df(self): def test_isin_tuples(self): # GH#16394 - df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]}) + df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]}) df["C"] = list(zip(df["A"], df["B"])) result = df["C"].isin([(1, "a")]) tm.assert_series_equal(result, Series([True, False, False], name="C")) @@ -124,7 +124,7 @@ def test_isin_dupe_self(self): tm.assert_frame_equal(result, expected) def test_isin_against_series(self): - df = pd.DataFrame( + df = DataFrame( {"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}, index=["a", "b", "c", "d"] ) s = Series([1, 3, 11, 4], index=["a", "b", "c", "d"]) @@ -193,13 +193,13 @@ def test_isin_empty_datetimelike(self): @pytest.mark.parametrize( "values", [ - pd.DataFrame({"a": [1, 2, 3]}, dtype="category"), + DataFrame({"a": [1, 2, 3]}, dtype="category"), Series([1, 2, 3], dtype="category"), ], ) def test_isin_category_frame(self, values): # GH#34256 - df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) expected = DataFrame({"a": [True, True, True], "b": [False, False, False]}) result = df.isin(values) diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 80e57b9d71a85..5cdd65b8cf6e2 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -11,7 +11,7 @@ class TestDataFrameQuantile: "df,expected", [ [ - pd.DataFrame( + DataFrame( { 0: Series(pd.arrays.SparseArray([1, 2])), 1: Series(pd.arrays.SparseArray([3, 4])), @@ -20,7 +20,7 @@ class TestDataFrameQuantile: Series([1.5, 3.5], name=0.5), ], [ - pd.DataFrame(Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")), + DataFrame(Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")), Series([1.0], name=0.5), ], ], @@ -79,7 +79,7 @@ def test_quantile_date_range(self): dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") ser = Series(dti) - df = pd.DataFrame(ser) + df = DataFrame(ser) result = df.quantile(numeric_only=False) expected = Series( @@ -319,7 +319,7 @@ def test_quantile_box(self): tm.assert_series_equal(res, exp) res = df.quantile([0.5], numeric_only=False) - exp = pd.DataFrame( + exp = DataFrame( [ [ pd.Timestamp("2011-01-02"), @@ -391,7 +391,7 @@ def test_quantile_box(self): tm.assert_series_equal(res, exp) res = df.quantile([0.5], numeric_only=False) - exp = pd.DataFrame( + exp = DataFrame( [ [ pd.Timestamp("2011-01-02"), @@ -506,7 +506,7 @@ def test_quantile_empty_no_rows(self): def test_quantile_empty_no_columns(self): # GH#23925 _get_numeric_data may drop all columns - df = pd.DataFrame(pd.date_range("1/1/18", periods=5)) + df = DataFrame(pd.date_range("1/1/18", periods=5)) df.columns.name = "captain tightpants" result = df.quantile(0.5) expected = Series([], index=[], name=0.5, dtype=np.float64) @@ -514,6 +514,6 @@ def test_quantile_empty_no_columns(self): tm.assert_series_equal(result, expected) result = df.quantile([0.5]) - expected = pd.DataFrame([], index=[0.5], columns=[]) + expected = DataFrame([], index=[0.5], columns=[]) expected.columns.name = "captain tightpants" tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index 99a3bbdf5ffe3..99494191c043a 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -76,7 +76,7 @@ def test_reindex(self, float_frame): assert result is not float_frame def test_reindex_nan(self): - df = pd.DataFrame( + df = DataFrame( [[1, 2], [3, 5], [7, 11], [9, 23]], index=[2, np.nan, 1, 5], columns=["joe", "jim"], @@ -89,7 +89,7 @@ def test_reindex_nan(self): tm.assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False) # GH10388 - df = pd.DataFrame( + df = DataFrame( { "other": ["a", "b", np.nan, "c"], "date": ["2015-03-22", np.nan, "2012-01-08", np.nan], @@ -263,8 +263,8 @@ def test_reindex_dups(self): def test_reindex_axis_style(self): # https://github.com/pandas-dev/pandas/issues/12392 - df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) - expected = pd.DataFrame( + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + expected = DataFrame( {"A": [1, 2, np.nan], "B": [4, 5, np.nan]}, index=[0, 1, 3] ) result = df.reindex([0, 1, 3]) @@ -278,8 +278,8 @@ def test_reindex_axis_style(self): def test_reindex_positional_warns(self): # https://github.com/pandas-dev/pandas/issues/12392 - df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) - expected = pd.DataFrame({"A": [1.0, 2], "B": [4.0, 5], "C": [np.nan, np.nan]}) + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + expected = DataFrame({"A": [1.0, 2], "B": [4.0, 5], "C": [np.nan, np.nan]}) with tm.assert_produces_warning(FutureWarning): result = df.reindex([0, 1], ["A", "B", "C"]) @@ -287,7 +287,7 @@ def test_reindex_positional_warns(self): def test_reindex_axis_style_raises(self): # https://github.com/pandas-dev/pandas/issues/12392 - df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) with pytest.raises(TypeError, match="Cannot specify both 'axis'"): df.reindex([0, 1], ["A"], axis=1) @@ -322,9 +322,9 @@ def test_reindex_axis_style_raises(self): def test_reindex_single_named_indexer(self): # https://github.com/pandas-dev/pandas/issues/12392 - df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]}) + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]}) result = df.reindex([0, 1], columns=["A"]) - expected = pd.DataFrame({"A": [1, 2]}) + expected = DataFrame({"A": [1, 2]}) tm.assert_frame_equal(result, expected) def test_reindex_api_equivalence(self): @@ -444,9 +444,9 @@ def test_reindex_multi_categorical_time(self): Categorical(date_range("2012-01-01", periods=3, freq="H")), ] ) - df = pd.DataFrame({"a": range(len(midx))}, index=midx) + df = DataFrame({"a": range(len(midx))}, index=midx) df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]] result = df2.reindex(midx) - expected = pd.DataFrame({"a": [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx) + expected = DataFrame({"a": [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 569677f1fec5e..2c909ab2f8227 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -553,13 +553,13 @@ def test_regex_replace_dict_nested(self, mix_abc): def test_regex_replace_dict_nested_non_first_character(self): # GH 25259 - df = pd.DataFrame({"first": ["abc", "bca", "cab"]}) - expected = pd.DataFrame({"first": [".bc", "bc.", "c.b"]}) + df = DataFrame({"first": ["abc", "bca", "cab"]}) + expected = DataFrame({"first": [".bc", "bc.", "c.b"]}) result = df.replace({"a": "."}, regex=True) tm.assert_frame_equal(result, expected) def test_regex_replace_dict_nested_gh4115(self): - df = pd.DataFrame({"Type": ["Q", "T", "Q", "Q", "T"], "tmp": 2}) + df = DataFrame({"Type": ["Q", "T", "Q", "Q", "T"], "tmp": 2}) expected = DataFrame({"Type": [0, 1, 0, 0, 1], "tmp": 2}) result = df.replace({"Type": {"Q": 0, "T": 1}}) tm.assert_frame_equal(result, expected) @@ -669,11 +669,11 @@ def test_replace(self, datetime_frame): # GH 11698 # test for mixed data types. - df = pd.DataFrame( + df = DataFrame( [("-", pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))] ) df1 = df.replace("-", np.nan) - expected_df = pd.DataFrame( + expected_df = DataFrame( [(np.nan, pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))] ) tm.assert_frame_equal(df1, expected_df) @@ -712,7 +712,7 @@ def test_replace_list(self): def test_replace_with_empty_list(self): # GH 21977 s = Series([["a", "b"], [], np.nan, [1]]) - df = pd.DataFrame({"col": s}) + df = DataFrame({"col": s}) expected = df result = df.replace([], np.nan) tm.assert_frame_equal(result, expected) @@ -1162,7 +1162,7 @@ def test_replace_with_dict_with_bool_keys(self): def test_replace_dict_strings_vs_ints(self): # GH#34789 - df = pd.DataFrame({"Y0": [1, 2], "Y1": [3, 4]}) + df = DataFrame({"Y0": [1, 2], "Y1": [3, 4]}) result = df.replace({"replace_string": "test"}) tm.assert_frame_equal(result, df) @@ -1196,14 +1196,14 @@ def test_nested_dict_overlapping_keys_replace_str(self): tm.assert_frame_equal(result, expected) def test_replace_swapping_bug(self): - df = pd.DataFrame({"a": [True, False, True]}) + df = DataFrame({"a": [True, False, True]}) res = df.replace({"a": {True: "Y", False: "N"}}) - expect = pd.DataFrame({"a": ["Y", "N", "Y"]}) + expect = DataFrame({"a": ["Y", "N", "Y"]}) tm.assert_frame_equal(res, expect) - df = pd.DataFrame({"a": [0, 1, 0]}) + df = DataFrame({"a": [0, 1, 0]}) res = df.replace({"a": {0: "Y", 1: "N"}}) - expect = pd.DataFrame({"a": ["Y", "N", "Y"]}) + expect = DataFrame({"a": ["Y", "N", "Y"]}) tm.assert_frame_equal(res, expect) def test_replace_period(self): @@ -1221,7 +1221,7 @@ def test_replace_period(self): } } - df = pd.DataFrame( + df = DataFrame( [ "out_augmented_AUG_2012.json", "out_augmented_SEP_2013.json", @@ -1255,7 +1255,7 @@ def test_replace_datetime(self): } } - df = pd.DataFrame( + df = DataFrame( [ "out_augmented_AUG_2012.json", "out_augmented_SEP_2013.json", @@ -1453,9 +1453,9 @@ def test_replace_commutative(self, df, to_replace, exp): # DataFrame.replace() overwrites when values are non-numeric # also added to data frame whilst issue was for series - df = pd.DataFrame(df) + df = DataFrame(df) - expected = pd.DataFrame(exp) + expected = DataFrame(exp) result = df.replace(to_replace) tm.assert_frame_equal(result, expected) @@ -1471,22 +1471,22 @@ def test_replace_commutative(self, df, to_replace, exp): ) def test_replace_replacer_dtype(self, replacer): # GH26632 - df = pd.DataFrame(["a"]) + df = DataFrame(["a"]) result = df.replace({"a": replacer, "b": replacer}) - expected = pd.DataFrame([replacer]) + expected = DataFrame([replacer]) tm.assert_frame_equal(result, expected) def test_replace_after_convert_dtypes(self): # GH31517 - df = pd.DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64") + df = DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64") result = df.replace(1, 10) - expected = pd.DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64") + expected = DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64") tm.assert_frame_equal(result, expected) def test_replace_invalid_to_replace(self): # GH 18634 # API: replace() should raise an exception if invalid argument is given - df = pd.DataFrame({"one": ["a", "b ", "c"], "two": ["d ", "e ", "f "]}) + df = DataFrame({"one": ["a", "b ", "c"], "two": ["d ", "e ", "f "]}) msg = ( r"Expecting 'to_replace' to be either a scalar, array-like, " r"dict or None, got invalid type.*" @@ -1498,17 +1498,17 @@ def test_replace_invalid_to_replace(self): @pytest.mark.parametrize("value", [np.nan, pd.NA]) def test_replace_no_replacement_dtypes(self, dtype, value): # https://github.com/pandas-dev/pandas/issues/32988 - df = pd.DataFrame(np.eye(2), dtype=dtype) + df = DataFrame(np.eye(2), dtype=dtype) result = df.replace(to_replace=[None, -np.inf, np.inf], value=value) tm.assert_frame_equal(result, df) @pytest.mark.parametrize("replacement", [np.nan, 5]) def test_replace_with_duplicate_columns(self, replacement): # GH 24798 - result = pd.DataFrame({"A": [1, 2, 3], "A1": [4, 5, 6], "B": [7, 8, 9]}) + result = DataFrame({"A": [1, 2, 3], "A1": [4, 5, 6], "B": [7, 8, 9]}) result.columns = list("AAB") - expected = pd.DataFrame( + expected = DataFrame( {"A": [1, 2, 3], "A1": [4, 5, 6], "B": [replacement, 8, 9]} ) expected.columns = list("AAB") @@ -1525,9 +1525,9 @@ def test_replace_period_ignore_float(self): Regression test for GH#34871: if df.replace(1.0, 0.0) is called on a df with a Period column the old, faulty behavior is to raise TypeError. """ - df = pd.DataFrame({"Per": [pd.Period("2020-01")] * 3}) + df = DataFrame({"Per": [pd.Period("2020-01")] * 3}) result = df.replace(1.0, 0.0) - expected = pd.DataFrame({"Per": [pd.Period("2020-01")] * 3}) + expected = DataFrame({"Per": [pd.Period("2020-01")] * 3}) tm.assert_frame_equal(expected, result) def test_replace_value_category_type(self): @@ -1545,7 +1545,7 @@ def test_replace_value_category_type(self): "col5": ["obj1", "obj2", "obj3", "obj4"], } # explicitly cast columns as category and order them - input_df = pd.DataFrame(data=input_dict).astype( + input_df = DataFrame(data=input_dict).astype( {"col2": "category", "col4": "category"} ) input_df["col2"] = input_df["col2"].cat.reorder_categories( @@ -1564,7 +1564,7 @@ def test_replace_value_category_type(self): "col5": ["obj9", "obj2", "obj3", "obj4"], } # explicitly cast columns as category and order them - expected = pd.DataFrame(data=expected_dict).astype( + expected = DataFrame(data=expected_dict).astype( {"col2": "category", "col4": "category"} ) expected["col2"] = expected["col2"].cat.reorder_categories( @@ -1594,14 +1594,14 @@ def test_replace_dict_category_type(self, input_category_df, expected_category_d # create input dataframe input_dict = {"col1": ["a"], "col2": ["obj1"], "col3": ["cat1"]} # explicitly cast columns as category - input_df = pd.DataFrame(data=input_dict).astype( + input_df = DataFrame(data=input_dict).astype( {"col1": "category", "col2": "category", "col3": "category"} ) # create expected dataframe expected_dict = {"col1": ["z"], "col2": ["obj9"], "col3": ["catX"]} # explicitly cast columns as category - expected = pd.DataFrame(data=expected_dict).astype( + expected = DataFrame(data=expected_dict).astype( {"col1": "category", "col2": "category", "col3": "category"} ) @@ -1612,23 +1612,23 @@ def test_replace_dict_category_type(self, input_category_df, expected_category_d def test_replace_with_compiled_regex(self): # https://github.com/pandas-dev/pandas/issues/35680 - df = pd.DataFrame(["a", "b", "c"]) + df = DataFrame(["a", "b", "c"]) regex = re.compile("^a$") result = df.replace({regex: "z"}, regex=True) - expected = pd.DataFrame(["z", "b", "c"]) + expected = DataFrame(["z", "b", "c"]) tm.assert_frame_equal(result, expected) def test_replace_intervals(self): # https://github.com/pandas-dev/pandas/issues/35931 - df = pd.DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]}) + df = DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]}) result = df.replace({"a": {pd.Interval(0, 1): "x"}}) - expected = pd.DataFrame({"a": ["x", "x"]}) + expected = DataFrame({"a": ["x", "x"]}) tm.assert_frame_equal(result, expected) def test_replace_unicode(self): # GH: 16784 columns_values_map = {"positive": {"正面": 1, "中立": 1, "负面": 0}} - df1 = pd.DataFrame({"positive": np.ones(3)}) + df1 = DataFrame({"positive": np.ones(3)}) result = df1.replace(columns_values_map) - expected = pd.DataFrame({"positive": np.ones(3)}) + expected = DataFrame({"positive": np.ones(3)}) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_round.py b/pandas/tests/frame/methods/test_round.py index db97a3e2a0e4f..5cf5aea8846c5 100644 --- a/pandas/tests/frame/methods/test_round.py +++ b/pandas/tests/frame/methods/test_round.py @@ -168,7 +168,7 @@ def test_round_mixed_type(self): def test_round_with_duplicate_columns(self): # GH#11611 - df = pd.DataFrame( + df = DataFrame( np.random.random([3, 3]), columns=["A", "B", "C"], index=["first", "second", "third"], @@ -195,7 +195,7 @@ def test_round_builtin(self): def test_round_nonunique_categorical(self): # See GH#21809 idx = pd.CategoricalIndex(["low"] * 3 + ["hi"] * 3) - df = pd.DataFrame(np.random.rand(6, 3), columns=list("abc")) + df = DataFrame(np.random.rand(6, 3), columns=list("abc")) expected = df.round(3) expected.index = idx diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py index 5daecd6a475aa..2e21ce8ec2256 100644 --- a/pandas/tests/frame/methods/test_shift.py +++ b/pandas/tests/frame/methods/test_shift.py @@ -131,7 +131,7 @@ def test_shift_duplicate_columns(self): shifted = [] for columns in column_lists: - df = pd.DataFrame(data.copy(), columns=columns) + df = DataFrame(data.copy(), columns=columns) for s in range(5): df.iloc[:, s] = df.iloc[:, s].shift(s + 1) df.columns = range(5) @@ -147,8 +147,8 @@ def test_shift_duplicate_columns(self): def test_shift_axis1_multiple_blocks(self): # GH#35488 - df1 = pd.DataFrame(np.random.randint(1000, size=(5, 3))) - df2 = pd.DataFrame(np.random.randint(1000, size=(5, 2))) + df1 = DataFrame(np.random.randint(1000, size=(5, 3))) + df2 = DataFrame(np.random.randint(1000, size=(5, 2))) df3 = pd.concat([df1, df2], axis=1) assert len(df3._mgr.blocks) == 2 @@ -284,13 +284,11 @@ def test_shift_dt64values_int_fill_deprecated(self): tm.assert_frame_equal(result, expected) # axis = 1 - df2 = pd.DataFrame({"A": ser, "B": ser}) + df2 = DataFrame({"A": ser, "B": ser}) df2._consolidate_inplace() with tm.assert_produces_warning(FutureWarning): result = df2.shift(1, axis=1, fill_value=0) - expected = pd.DataFrame( - {"A": [pd.Timestamp(0), pd.Timestamp(0)], "B": df2["A"]} - ) + expected = DataFrame({"A": [pd.Timestamp(0), pd.Timestamp(0)], "B": df2["A"]}) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index a106702aff807..55450a693c2e6 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -352,7 +352,7 @@ def test_sort_index_multiindex(self, level): expected_mi = MultiIndex.from_tuples( [[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC") ) - expected = pd.DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi) + expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi) result = df.sort_index(level=level) tm.assert_frame_equal(result, expected) @@ -360,7 +360,7 @@ def test_sort_index_multiindex(self, level): expected_mi = MultiIndex.from_tuples( [[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC") ) - expected = pd.DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi) + expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi) result = df.sort_index(level=level, sort_remaining=False) tm.assert_frame_equal(result, expected) @@ -736,14 +736,14 @@ def test_sort_multi_index_key_str(self): tm.assert_frame_equal(result, expected) def test_changes_length_raises(self): - df = pd.DataFrame({"A": [1, 2, 3]}) + df = DataFrame({"A": [1, 2, 3]}) with pytest.raises(ValueError, match="change the shape"): df.sort_index(key=lambda x: x[:1]) def test_sort_index_multiindex_sparse_column(self): # GH 29735, testing that sort_index on a multiindexed frame with sparse # columns fills with 0. - expected = pd.DataFrame( + expected = DataFrame( { i: pd.array([0.0, 0.0, 0.0, 0.0], dtype=pd.SparseDtype("float64", 0.0)) for i in range(0, 4) diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py index 0ca232ec433e7..d59dc08b94563 100644 --- a/pandas/tests/frame/methods/test_sort_values.py +++ b/pandas/tests/frame/methods/test_sort_values.py @@ -130,7 +130,7 @@ def test_sort_values_multicolumn_uint64(self): # GH#9918 # uint64 multicolumn sort - df = pd.DataFrame( + df = DataFrame( { "a": pd.Series([18446637057563306014, 1162265347240853609]), "b": pd.Series([1, 2]), @@ -139,7 +139,7 @@ def test_sort_values_multicolumn_uint64(self): df["a"] = df["a"].astype(np.uint64) result = df.sort_values(["a", "b"]) - expected = pd.DataFrame( + expected = DataFrame( { "a": pd.Series([18446637057563306014, 1162265347240853609]), "b": pd.Series([1, 2]), @@ -355,14 +355,14 @@ def test_sort_nat(self): Timestamp(x) for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"] ] - df = pd.DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3]) + df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3]) d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]] d4 = [ Timestamp(x) for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"] ] - expected = pd.DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2]) + expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2]) sorted_df = df.sort_values(by=["a", "b"]) tm.assert_frame_equal(sorted_df, expected) @@ -381,7 +381,7 @@ def test_sort_values_na_position_with_categories(self): reversed_category_indices = sorted(category_indices, reverse=True) reversed_na_indices = sorted(na_indices) - df = pd.DataFrame( + df = DataFrame( { column_name: pd.Categorical( ["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True @@ -461,19 +461,19 @@ def test_sort_values_nat(self): Timestamp(x) for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"] ] - df = pd.DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3]) + df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3]) d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]] d4 = [ Timestamp(x) for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"] ] - expected = pd.DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2]) + expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2]) sorted_df = df.sort_values(by=["a", "b"]) tm.assert_frame_equal(sorted_df, expected) def test_sort_values_na_position_with_categories_raises(self): - df = pd.DataFrame( + df = DataFrame( { "c": pd.Categorical( ["A", np.nan, "B", np.nan, "C"], @@ -525,7 +525,7 @@ def test_sort_values_ignore_index( def test_sort_values_nat_na_position_default(self): # GH 13230 - expected = pd.DataFrame( + expected = DataFrame( { "A": [1, 2, 3, 4, 4], "date": pd.DatetimeIndex( @@ -666,7 +666,7 @@ def test_sort_values_key_empty(self, sort_by_key): df.sort_index(key=sort_by_key) def test_changes_length_raises(self): - df = pd.DataFrame({"A": [1, 2, 3]}) + df = DataFrame({"A": [1, 2, 3]}) with pytest.raises(ValueError, match="change the shape"): df.sort_values("A", key=lambda x: x[:1]) @@ -696,7 +696,7 @@ def test_sort_values_key_dict_axis(self): def test_sort_values_key_casts_to_categorical(self, ordered): # https://github.com/pandas-dev/pandas/issues/36383 categories = ["c", "b", "a"] - df = pd.DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]}) + df = DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]}) def sorter(key): if key.name == "y": @@ -706,7 +706,7 @@ def sorter(key): return key result = df.sort_values(by=["x", "y"], key=sorter) - expected = pd.DataFrame( + expected = DataFrame( {"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0]) ) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 9cf5afc09e800..fefe1392087dd 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -463,7 +463,7 @@ def test_nunique(self): @pytest.mark.parametrize("tz", [None, "UTC"]) def test_mean_mixed_datetime_numeric(self, tz): # https://github.com/pandas-dev/pandas/issues/24752 - df = pd.DataFrame({"A": [1, 1], "B": [pd.Timestamp("2000", tz=tz)] * 2}) + df = DataFrame({"A": [1, 1], "B": [pd.Timestamp("2000", tz=tz)] * 2}) with tm.assert_produces_warning(FutureWarning): result = df.mean() expected = Series([1.0], index=["A"]) @@ -474,7 +474,7 @@ def test_mean_excludes_datetimes(self, tz): # https://github.com/pandas-dev/pandas/issues/24752 # Our long-term desired behavior is unclear, but the behavior in # 0.24.0rc1 was buggy. - df = pd.DataFrame({"A": [pd.Timestamp("2000", tz=tz)] * 2}) + df = DataFrame({"A": [pd.Timestamp("2000", tz=tz)] * 2}) with tm.assert_produces_warning(FutureWarning): result = df.mean() @@ -498,7 +498,7 @@ def test_mean_mixed_string_decimal(self): {"A": 5, "B": None, "C": Decimal("1223.00")}, ] - df = pd.DataFrame(d) + df = DataFrame(d) result = df.mean() expected = Series([2.7, 681.6], index=["A", "C"]) @@ -766,9 +766,7 @@ def test_sum_corner(self): @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)]) def test_sum_prod_nanops(self, method, unit): idx = ["a", "b", "c"] - df = pd.DataFrame( - {"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]} - ) + df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) # The default result = getattr(df, method) expected = Series([unit, unit, unit], index=idx, dtype="float64") @@ -788,7 +786,7 @@ def test_sum_prod_nanops(self, method, unit): tm.assert_series_equal(result, expected) # min_count > 1 - df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) + df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) result = getattr(df, method)(min_count=5) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) @@ -800,7 +798,7 @@ def test_sum_prod_nanops(self, method, unit): def test_sum_nanops_timedelta(self): # prod isn't defined on timedeltas idx = ["a", "b", "c"] - df = pd.DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]}) + df = DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]}) df2 = df.apply(pd.to_timedelta) @@ -832,7 +830,7 @@ def test_sum_bool(self, float_frame): def test_sum_mixed_datetime(self): # GH#30886 - df = pd.DataFrame( + df = DataFrame( {"A": pd.date_range("2000", periods=4), "B": [1, 2, 3, 4]} ).reindex([2, 3, 4]) result = df.sum() @@ -861,7 +859,7 @@ def test_mean_datetimelike(self): # GH#24757 check that datetimelike are excluded by default, handled # correctly with numeric_only=True - df = pd.DataFrame( + df = DataFrame( { "A": np.arange(3), "B": pd.date_range("2016-01-01", periods=3), @@ -880,7 +878,7 @@ def test_mean_datetimelike(self): tm.assert_series_equal(result, expected) def test_mean_datetimelike_numeric_only_false(self): - df = pd.DataFrame( + df = DataFrame( { "A": np.arange(3), "B": pd.date_range("2016-01-01", periods=3), @@ -902,9 +900,9 @@ def test_mean_datetimelike_numeric_only_false(self): def test_mean_extensionarray_numeric_only_true(self): # https://github.com/pandas-dev/pandas/issues/33256 arr = np.random.randint(1000, size=(10, 5)) - df = pd.DataFrame(arr, dtype="Int64") + df = DataFrame(arr, dtype="Int64") result = df.mean(numeric_only=True) - expected = pd.DataFrame(arr).mean() + expected = DataFrame(arr).mean() tm.assert_series_equal(result, expected) def test_stats_mixed_type(self, float_string_frame): @@ -1134,7 +1132,7 @@ def test_series_broadcasting(self): class TestDataFrameReductions: def test_min_max_dt64_with_NaT(self): # Both NaT and Timestamp are in DataFrame. - df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]}) + df = DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]}) res = df.min() exp = Series([pd.Timestamp("2012-05-01")], index=["foo"]) @@ -1145,7 +1143,7 @@ def test_min_max_dt64_with_NaT(self): tm.assert_series_equal(res, exp) # GH12941, only NaTs are in DataFrame. - df = pd.DataFrame({"foo": [pd.NaT, pd.NaT]}) + df = DataFrame({"foo": [pd.NaT, pd.NaT]}) res = df.min() exp = Series([pd.NaT], index=["foo"]) @@ -1160,7 +1158,7 @@ def test_min_max_dt64_api_consistency_with_NaT(self): # returned NaT for series. These tests check that the API is consistent in # min/max calls on empty Series/DataFrames. See GH:33704 for more # information - df = pd.DataFrame(dict(x=pd.to_datetime([]))) + df = DataFrame(dict(x=pd.to_datetime([]))) expected_dt_series = Series(pd.to_datetime([])) # check axis 0 assert (df.min(axis=0).x is pd.NaT) == (expected_dt_series.min() is pd.NaT) @@ -1173,7 +1171,7 @@ def test_min_max_dt64_api_consistency_with_NaT(self): def test_min_max_dt64_api_consistency_empty_df(self): # check DataFrame/Series api consistency when calling min/max on an empty # DataFrame/Series. - df = pd.DataFrame(dict(x=[])) + df = DataFrame(dict(x=[])) expected_float_series = Series([], dtype=float) # check axis 0 assert np.isnan(df.min(axis=0).x) == np.isnan(expected_float_series.min()) @@ -1198,7 +1196,7 @@ def test_preserve_timezone(self, initial: str, method): def test_mixed_frame_with_integer_sum(): # https://github.com/pandas-dev/pandas/issues/34520 - df = pd.DataFrame([["a", 1]], columns=list("ab")) + df = DataFrame([["a", 1]], columns=list("ab")) df = df.astype({"b": "Int64"}) result = df.sum() expected = Series(["a", 1], index=["a", "b"]) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index f5d1808f367e7..d6bc19091dcef 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -102,14 +102,14 @@ def test_column_contains_raises(self, float_frame): def test_tab_completion(self): # DataFrame whose columns are identifiers shall have them in __dir__. - df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD")) + df = DataFrame([list("abcd"), list("efgh")], columns=list("ABCD")) for key in list("ABCD"): assert key in dir(df) assert isinstance(df.__getitem__("A"), pd.Series) # DataFrame whose first-level columns are identifiers shall have # them in __dir__. - df = pd.DataFrame( + df = DataFrame( [list("abcd"), list("efgh")], columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))), ) @@ -342,27 +342,27 @@ def test_values_mixed_dtypes(self, float_frame, float_string_frame): tm.assert_almost_equal(arr, expected) def test_to_numpy(self): - df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]}) + df = DataFrame({"A": [1, 2], "B": [3, 4.5]}) expected = np.array([[1, 3], [2, 4.5]]) result = df.to_numpy() tm.assert_numpy_array_equal(result, expected) def test_to_numpy_dtype(self): - df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]}) + df = DataFrame({"A": [1, 2], "B": [3, 4.5]}) expected = np.array([[1, 3], [2, 4]], dtype="int64") result = df.to_numpy(dtype="int64") tm.assert_numpy_array_equal(result, expected) def test_to_numpy_copy(self): arr = np.random.randn(4, 3) - df = pd.DataFrame(arr) + df = DataFrame(arr) assert df.values.base is arr assert df.to_numpy(copy=False).base is arr assert df.to_numpy(copy=True).base is not arr def test_to_numpy_mixed_dtype_to_str(self): # https://github.com/pandas-dev/pandas/issues/35455 - df = pd.DataFrame([[pd.Timestamp("2020-01-01 00:00:00"), 100.0]]) + df = DataFrame([[pd.Timestamp("2020-01-01 00:00:00"), 100.0]]) result = df.to_numpy(dtype=str) expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str) tm.assert_numpy_array_equal(result, expected) @@ -529,7 +529,7 @@ async def test_tab_complete_warning(self, ip): pytest.importorskip("IPython", minversion="6.0.0") from IPython.core.completer import provisionalcompleter - code = "import pandas as pd; df = pd.DataFrame()" + code = "from pandas import DataFrame; df = DataFrame()" await ip.run_code(code) # TODO: remove it when Ipython updates @@ -547,7 +547,7 @@ async def test_tab_complete_warning(self, ip): list(ip.Completer.completions("df.", 1)) def test_attrs(self): - df = pd.DataFrame({"A": [2, 3]}) + df = DataFrame({"A": [2, 3]}) assert df.attrs == {} df.attrs["version"] = 1 @@ -556,7 +556,7 @@ def test_attrs(self): @pytest.mark.parametrize("allows_duplicate_labels", [True, False, None]) def test_set_flags(self, allows_duplicate_labels): - df = pd.DataFrame({"A": [1, 2]}) + df = DataFrame({"A": [1, 2]}) result = df.set_flags(allows_duplicate_labels=allows_duplicate_labels) if allows_duplicate_labels is None: # We don't update when it's not provided diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 8db3feacfc7af..788ac56829a2b 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -23,7 +23,7 @@ class TestFrameComparisons: def test_frame_in_list(self): # GH#12689 this should raise at the DataFrame level, not blocks - df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD")) + df = DataFrame(np.random.randn(6, 4), columns=list("ABCD")) msg = "The truth value of a DataFrame is ambiguous" with pytest.raises(ValueError, match=msg): df in [None] @@ -35,7 +35,7 @@ def check(df, df2): # we expect the result to match Series comparisons for # == and !=, inequalities should raise result = x == y - expected = pd.DataFrame( + expected = DataFrame( {col: x[col] == y[col] for col in x.columns}, index=x.index, columns=x.columns, @@ -43,7 +43,7 @@ def check(df, df2): tm.assert_frame_equal(result, expected) result = x != y - expected = pd.DataFrame( + expected = DataFrame( {col: x[col] != y[col] for col in x.columns}, index=x.index, columns=x.columns, @@ -71,15 +71,15 @@ def check(df, df2): # GH4968 # invalid date/int comparisons - df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"]) + df = DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"]) df["dates"] = pd.date_range("20010101", periods=len(df)) df2 = df.copy() df2["dates"] = df["a"] check(df, df2) - df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"]) - df2 = pd.DataFrame( + df = DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"]) + df2 = DataFrame( { "a": pd.date_range("20010101", periods=len(df)), "b": pd.date_range("20100101", periods=len(df)), @@ -90,7 +90,7 @@ def check(df, df2): def test_timestamp_compare(self): # make sure we can compare Timestamps on the right AND left hand side # GH#4982 - df = pd.DataFrame( + df = DataFrame( { "dates1": pd.date_range("20010101", periods=10), "dates2": pd.date_range("20010102", periods=10), @@ -129,8 +129,8 @@ def test_mixed_comparison(self): # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False, # not raise TypeError # (this appears to be fixed before GH#22163, not sure when) - df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]]) - other = pd.DataFrame([["a", "b"], ["c", "d"]]) + df = DataFrame([["1989-08-01", 1], ["1989-08-01", 2]]) + other = DataFrame([["a", "b"], ["c", "d"]]) result = df == other assert not result.any().any() @@ -142,9 +142,9 @@ def test_df_boolean_comparison_error(self): # GH#4576, GH#22880 # comparing DataFrame against list/tuple with len(obj) matching # len(df.columns) is supported as of GH#22800 - df = pd.DataFrame(np.arange(6).reshape((3, 2))) + df = DataFrame(np.arange(6).reshape((3, 2))) - expected = pd.DataFrame([[False, False], [True, False], [False, False]]) + expected = DataFrame([[False, False], [True, False], [False, False]]) result = df == (2, 2) tm.assert_frame_equal(result, expected) @@ -153,15 +153,13 @@ def test_df_boolean_comparison_error(self): tm.assert_frame_equal(result, expected) def test_df_float_none_comparison(self): - df = pd.DataFrame( - np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"] - ) + df = DataFrame(np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]) result = df.__eq__(None) assert not result.any().any() def test_df_string_comparison(self): - df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}]) + df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}]) mask_a = df.a > 1 tm.assert_frame_equal(df[mask_a], df.loc[1:1, :]) tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :]) @@ -176,8 +174,8 @@ class TestFrameFlexComparisons: def test_bool_flex_frame(self): data = np.random.randn(5, 3) other_data = np.random.randn(5, 3) - df = pd.DataFrame(data) - other = pd.DataFrame(other_data) + df = DataFrame(data) + other = DataFrame(other_data) ndim_5 = np.ones(df.shape + (1, 3)) # Unaligned @@ -265,8 +263,8 @@ def test_bool_flex_frame_complex_dtype(self): # complex arr = np.array([np.nan, 1, 6, np.nan]) arr2 = np.array([2j, np.nan, 7, None]) - df = pd.DataFrame({"a": arr}) - df2 = pd.DataFrame({"a": arr2}) + df = DataFrame({"a": arr}) + df2 = DataFrame({"a": arr2}) msg = "|".join( [ @@ -288,7 +286,7 @@ def test_bool_flex_frame_complex_dtype(self): assert rs.values.all() arr3 = np.array([2j, np.nan, None]) - df3 = pd.DataFrame({"a": arr3}) + df3 = DataFrame({"a": arr3}) with pytest.raises(TypeError, match=msg): # inequalities are not well-defined for complex numbers @@ -302,16 +300,16 @@ def test_bool_flex_frame_complex_dtype(self): def test_bool_flex_frame_object_dtype(self): # corner, dtype=object - df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]}) - df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]}) + df1 = DataFrame({"col": ["foo", np.nan, "bar"]}) + df2 = DataFrame({"col": ["foo", datetime.now(), "bar"]}) result = df1.ne(df2) - exp = pd.DataFrame({"col": [False, True, False]}) + exp = DataFrame({"col": [False, True, False]}) tm.assert_frame_equal(result, exp) def test_flex_comparison_nat(self): # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT, # and _definitely_ not be NaN - df = pd.DataFrame([pd.NaT]) + df = DataFrame([pd.NaT]) result = df == pd.NaT # result.iloc[0, 0] is a np.bool_ object @@ -329,7 +327,7 @@ def test_flex_comparison_nat(self): @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) def test_df_flex_cmp_constant_return_types(self, opname): # GH 15077, non-empty DataFrame - df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) + df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) const = 2 result = getattr(df, opname)(const).dtypes.value_counts() @@ -338,7 +336,7 @@ def test_df_flex_cmp_constant_return_types(self, opname): @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) def test_df_flex_cmp_constant_return_types_empty(self, opname): # GH 15077 empty DataFrame - df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) + df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) const = 2 empty = df.iloc[:0] @@ -347,12 +345,12 @@ def test_df_flex_cmp_constant_return_types_empty(self, opname): def test_df_flex_cmp_ea_dtype_with_ndarray_series(self): ii = pd.IntervalIndex.from_breaks([1, 2, 3]) - df = pd.DataFrame({"A": ii, "B": ii}) + df = DataFrame({"A": ii, "B": ii}) ser = Series([0, 0]) res = df.eq(ser, axis=0) - expected = pd.DataFrame({"A": [False, False], "B": [False, False]}) + expected = DataFrame({"A": [False, False], "B": [False, False]}) tm.assert_frame_equal(res, expected) ser2 = Series([1, 2], index=["A", "B"]) @@ -369,11 +367,11 @@ def test_floordiv_axis0(self): # make sure we df.floordiv(ser, axis=0) matches column-wise result arr = np.arange(3) ser = Series(arr) - df = pd.DataFrame({"A": ser, "B": ser}) + df = DataFrame({"A": ser, "B": ser}) result = df.floordiv(ser, axis=0) - expected = pd.DataFrame({col: df[col] // ser for col in df.columns}) + expected = DataFrame({col: df[col] // ser for col in df.columns}) tm.assert_frame_equal(result, expected) @@ -387,13 +385,13 @@ def test_floordiv_axis0_numexpr_path(self, opname): op = getattr(operator, opname) arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100 - df = pd.DataFrame(arr) + df = DataFrame(arr) df["C"] = 1.0 ser = df[0] result = getattr(df, opname)(ser, axis=0) - expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns}) + expected = DataFrame({col: op(df[col], ser) for col in df.columns}) tm.assert_frame_equal(result, expected) result2 = getattr(df, opname)(ser.values, axis=0) @@ -404,22 +402,22 @@ def test_df_add_td64_columnwise(self): dti = pd.date_range("2016-01-01", periods=10) tdi = pd.timedelta_range("1", periods=10) tser = Series(tdi) - df = pd.DataFrame({0: dti, 1: tdi}) + df = DataFrame({0: dti, 1: tdi}) result = df.add(tser, axis=0) - expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi}) + expected = DataFrame({0: dti + tdi, 1: tdi + tdi}) tm.assert_frame_equal(result, expected) def test_df_add_flex_filled_mixed_dtypes(self): # GH 19611 dti = pd.date_range("2016-01-01", periods=3) ser = Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]") - df = pd.DataFrame({"A": dti, "B": ser}) - other = pd.DataFrame({"A": ser, "B": ser}) + df = DataFrame({"A": dti, "B": ser}) + other = DataFrame({"A": ser, "B": ser}) fill = pd.Timedelta(days=1).to_timedelta64() result = df.add(other, fill_value=fill) - expected = pd.DataFrame( + expected = DataFrame( { "A": Series( ["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]" @@ -531,13 +529,13 @@ def test_arith_flex_series(self, simple_frame): tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T) # broadcasting issue in GH 7325 - df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64") - expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]]) + df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64") + expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]]) result = df.div(df[0], axis="index") tm.assert_frame_equal(result, expected) - df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64") - expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]]) + df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64") + expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]]) result = df.div(df[0], axis="index") tm.assert_frame_equal(result, expected) @@ -545,8 +543,8 @@ def test_arith_flex_zero_len_raises(self): # GH 19522 passing fill_value to frame flex arith methods should # raise even in the zero-length special cases ser_len0 = Series([], dtype=object) - df_len0 = pd.DataFrame(columns=["A", "B"]) - df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + df_len0 = DataFrame(columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) with pytest.raises(NotImplementedError, match="fill_value"): df.add(ser_len0, fill_value="E") @@ -557,7 +555,7 @@ def test_arith_flex_zero_len_raises(self): def test_flex_add_scalar_fill_value(self): # GH#12723 dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float") - df = pd.DataFrame({"foo": dat}, index=range(6)) + df = DataFrame({"foo": dat}, index=range(6)) exp = df.fillna(0).add(2) res = df.add(2, fill_value=0) @@ -569,21 +567,21 @@ def test_td64_op_nat_casting(self): # Make sure we don't accidentally treat timedelta64(NaT) as datetime64 # when calling dispatch_to_series in DataFrame arithmetic ser = Series(["NaT", "NaT"], dtype="timedelta64[ns]") - df = pd.DataFrame([[1, 2], [3, 4]]) + df = DataFrame([[1, 2], [3, 4]]) result = df * ser - expected = pd.DataFrame({0: ser, 1: ser}) + expected = DataFrame({0: ser, 1: ser}) tm.assert_frame_equal(result, expected) def test_df_add_2d_array_rowlike_broadcasts(self): # GH#23000 arr = np.arange(6).reshape(3, 2) - df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) rowlike = arr[[1], :] # shape --> (1, ncols) assert rowlike.shape == (1, df.shape[1]) - expected = pd.DataFrame( + expected = DataFrame( [[2, 4], [4, 6], [6, 8]], columns=df.columns, index=df.index, @@ -599,12 +597,12 @@ def test_df_add_2d_array_rowlike_broadcasts(self): def test_df_add_2d_array_collike_broadcasts(self): # GH#23000 arr = np.arange(6).reshape(3, 2) - df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) collike = arr[:, [1]] # shape --> (nrows, 1) assert collike.shape == (df.shape[0], 1) - expected = pd.DataFrame( + expected = DataFrame( [[1, 2], [5, 6], [9, 10]], columns=df.columns, index=df.index, @@ -622,7 +620,7 @@ def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators): opname = all_arithmetic_operators arr = np.arange(6).reshape(3, 2) - df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) rowlike = arr[[1], :] # shape --> (1, ncols) assert rowlike.shape == (1, df.shape[1]) @@ -633,7 +631,7 @@ def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators): getattr(df.loc["C"], opname)(rowlike.squeeze()), ] - expected = pd.DataFrame(exvals, columns=df.columns, index=df.index) + expected = DataFrame(exvals, columns=df.columns, index=df.index) result = getattr(df, opname)(rowlike) tm.assert_frame_equal(result, expected) @@ -643,7 +641,7 @@ def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators): opname = all_arithmetic_operators arr = np.arange(6).reshape(3, 2) - df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) collike = arr[:, [1]] # shape --> (nrows, 1) assert collike.shape == (df.shape[0], 1) @@ -659,7 +657,7 @@ def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators): # DataFrame op will return all-float. So we upcast `expected` dtype = np.common_type(*[x.values for x in exvals.values()]) - expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype) + expected = DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype) result = getattr(df, opname)(collike) tm.assert_frame_equal(result, expected) @@ -667,7 +665,7 @@ def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators): def test_df_bool_mul_int(self): # GH 22047, GH 22163 multiplication by 1 should result in int dtype, # not object dtype - df = pd.DataFrame([[False, True], [False, False]]) + df = DataFrame([[False, True], [False, False]]) result = df * 1 # On appveyor this comes back as np.int32 instead of np.int64, @@ -681,14 +679,14 @@ def test_df_bool_mul_int(self): def test_arith_mixed(self): - left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]}) + left = DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]}) result = left + left - expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]}) + expected = DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]}) tm.assert_frame_equal(result, expected) def test_arith_getitem_commute(self): - df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]}) + df = DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]}) def _test_op(df, op): result = op(df, 1) @@ -723,35 +721,35 @@ def _test_op(df, op): ) def test_arith_alignment_non_pandas_object(self, values): # GH#17901 - df = pd.DataFrame({"A": [1, 1], "B": [1, 1]}) - expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]}) + df = DataFrame({"A": [1, 1], "B": [1, 1]}) + expected = DataFrame({"A": [2, 2], "B": [3, 3]}) result = df + values tm.assert_frame_equal(result, expected) def test_arith_non_pandas_object(self): - df = pd.DataFrame( + df = DataFrame( np.arange(1, 10, dtype="f8").reshape(3, 3), columns=["one", "two", "three"], index=["a", "b", "c"], ) val1 = df.xs("a").values - added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns) + added = DataFrame(df.values + val1, index=df.index, columns=df.columns) tm.assert_frame_equal(df + val1, added) - added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns) + added = DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns) tm.assert_frame_equal(df.add(val1, axis=0), added) val2 = list(df["two"]) - added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns) + added = DataFrame(df.values + val2, index=df.index, columns=df.columns) tm.assert_frame_equal(df + val2, added) - added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns) + added = DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns) tm.assert_frame_equal(df.add(val2, axis="index"), added) val3 = np.random.rand(*df.shape) - added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns) + added = DataFrame(df.values + val3, index=df.index, columns=df.columns) tm.assert_frame_equal(df.add(val3), added) def test_operations_with_interval_categories_index(self, all_arithmetic_operators): @@ -759,15 +757,15 @@ def test_operations_with_interval_categories_index(self, all_arithmetic_operator op = all_arithmetic_operators ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0)) data = [1, 2] - df = pd.DataFrame([data], columns=ind) + df = DataFrame([data], columns=ind) num = 10 result = getattr(df, op)(num) - expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind) + expected = DataFrame([[getattr(n, op)(num) for n in data]], columns=ind) tm.assert_frame_equal(result, expected) def test_frame_with_frame_reindex(self): # GH#31623 - df = pd.DataFrame( + df = DataFrame( { "foo": [pd.Timestamp("2019"), pd.Timestamp("2020")], "bar": [pd.Timestamp("2018"), pd.Timestamp("2021")], @@ -778,7 +776,7 @@ def test_frame_with_frame_reindex(self): result = df - df2 - expected = pd.DataFrame( + expected = DataFrame( {"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]}, columns=["bar", "foo"], ) @@ -788,31 +786,31 @@ def test_frame_with_frame_reindex(self): def test_frame_with_zero_len_series_corner_cases(): # GH#28600 # easy all-float case - df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"]) + df = DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"]) ser = Series(dtype=np.float64) result = df + ser - expected = pd.DataFrame(df.values * np.nan, columns=df.columns) + expected = DataFrame(df.values * np.nan, columns=df.columns) tm.assert_frame_equal(result, expected) with tm.assert_produces_warning(FutureWarning): # Automatic alignment for comparisons deprecated result = df == ser - expected = pd.DataFrame(False, index=df.index, columns=df.columns) + expected = DataFrame(False, index=df.index, columns=df.columns) tm.assert_frame_equal(result, expected) # non-float case should not raise on comparison - df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns) + df2 = DataFrame(df.values.view("M8[ns]"), columns=df.columns) with tm.assert_produces_warning(FutureWarning): # Automatic alignment for comparisons deprecated result = df2 == ser - expected = pd.DataFrame(False, index=df.index, columns=df.columns) + expected = DataFrame(False, index=df.index, columns=df.columns) tm.assert_frame_equal(result, expected) def test_zero_len_frame_with_series_corner_cases(): # GH#28600 - df = pd.DataFrame(columns=["A", "B"], dtype=np.float64) + df = DataFrame(columns=["A", "B"], dtype=np.float64) ser = Series([1, 2], index=["A", "B"]) result = df + ser @@ -825,7 +823,7 @@ def test_frame_single_columns_object_sum_axis_1(): data = { "One": Series(["A", 1.2, np.nan]), } - df = pd.DataFrame(data) + df = DataFrame(data) result = df.sum(axis=1) expected = Series(["A", 1.2, 0]) tm.assert_series_equal(result, expected) @@ -840,7 +838,7 @@ def test_frame_single_columns_object_sum_axis_1(): class TestFrameArithmeticUnsorted: def test_frame_add_tz_mismatch_converts_to_utc(self): rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern") - df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"]) + df = DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"]) df_moscow = df.tz_convert("Europe/Moscow") result = df + df_moscow @@ -851,7 +849,7 @@ def test_frame_add_tz_mismatch_converts_to_utc(self): def test_align_frame(self): rng = pd.period_range("1/1/2000", "1/1/2010", freq="A") - ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng) + ts = DataFrame(np.random.randn(len(rng), 3), index=rng) result = ts + ts[::2] expected = ts + ts @@ -1424,7 +1422,7 @@ def test_inplace_ops_identity2(self, op): def test_alignment_non_pandas(self): index = ["A", "B", "C"] columns = ["X", "Y", "Z"] - df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns) + df = DataFrame(np.random.randn(3, 3), index=index, columns=columns) align = pd.core.ops.align_method_FRAME for val in [ @@ -1481,14 +1479,14 @@ def test_alignment_non_pandas(self): align(df, val, "columns") def test_no_warning(self, all_arithmetic_operators): - df = pd.DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) + df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) b = df["B"] with tm.assert_produces_warning(None): getattr(df, all_arithmetic_operators)(b) def test_dunder_methods_binary(self, all_arithmetic_operators): # GH#??? frame.__foo__ should only accept one argument - df = pd.DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) + df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) b = df["B"] with pytest.raises(TypeError, match="takes 2 positional arguments"): getattr(df, all_arithmetic_operators)(b, 0) @@ -1510,20 +1508,20 @@ def test_align_int_fill_bug(self): def test_pow_with_realignment(): # GH#32685 pow has special semantics for operating with null values - left = pd.DataFrame({"A": [0, 1, 2]}) - right = pd.DataFrame(index=[0, 1, 2]) + left = DataFrame({"A": [0, 1, 2]}) + right = DataFrame(index=[0, 1, 2]) result = left ** right - expected = pd.DataFrame({"A": [np.nan, 1.0, np.nan]}) + expected = DataFrame({"A": [np.nan, 1.0, np.nan]}) tm.assert_frame_equal(result, expected) # TODO: move to tests.arithmetic and parametrize def test_pow_nan_with_zero(): - left = pd.DataFrame({"A": [np.nan, np.nan, np.nan]}) - right = pd.DataFrame({"A": [0, 0, 0]}) + left = DataFrame({"A": [np.nan, np.nan, np.nan]}) + right = DataFrame({"A": [0, 0, 0]}) - expected = pd.DataFrame({"A": [1.0, 1.0, 1.0]}) + expected = DataFrame({"A": [1.0, 1.0, 1.0]}) result = left ** right tm.assert_frame_equal(result, expected) @@ -1534,11 +1532,11 @@ def test_pow_nan_with_zero(): def test_dataframe_series_extension_dtypes(): # https://github.com/pandas-dev/pandas/issues/34311 - df = pd.DataFrame(np.random.randint(0, 100, (10, 3)), columns=["a", "b", "c"]) + df = DataFrame(np.random.randint(0, 100, (10, 3)), columns=["a", "b", "c"]) ser = Series([1, 2, 3], index=["a", "b", "c"]) expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3) - expected = pd.DataFrame(expected, columns=df.columns, dtype="Int64") + expected = DataFrame(expected, columns=df.columns, dtype="Int64") df_ea = df.astype("Int64") result = df_ea + ser @@ -1550,7 +1548,7 @@ def test_dataframe_series_extension_dtypes(): def test_dataframe_blockwise_slicelike(): # GH#34367 arr = np.random.randint(0, 1000, (100, 10)) - df1 = pd.DataFrame(arr) + df1 = DataFrame(arr) df2 = df1.copy() df2.iloc[0, [1, 3, 7]] = np.nan @@ -1565,20 +1563,20 @@ def test_dataframe_blockwise_slicelike(): for left, right in [(df1, df2), (df2, df3), (df4, df5)]: res = left + right - expected = pd.DataFrame({i: left[i] + right[i] for i in left.columns}) + expected = DataFrame({i: left[i] + right[i] for i in left.columns}) tm.assert_frame_equal(res, expected) @pytest.mark.parametrize( "df, col_dtype", [ - (pd.DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"), - (pd.DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")), "object"), + (DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"), + (DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")), "object"), ], ) def test_dataframe_operation_with_non_numeric_types(df, col_dtype): # GH #22663 - expected = pd.DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab")) + expected = DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab")) expected = expected.astype({"b": col_dtype}) result = df + Series([-1.0], index=list("a")) tm.assert_frame_equal(result, expected) @@ -1586,17 +1584,17 @@ def test_dataframe_operation_with_non_numeric_types(df, col_dtype): def test_arith_reindex_with_duplicates(): # https://github.com/pandas-dev/pandas/issues/35194 - df1 = pd.DataFrame(data=[[0]], columns=["second"]) - df2 = pd.DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"]) + df1 = DataFrame(data=[[0]], columns=["second"]) + df2 = DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"]) result = df1 + df2 - expected = pd.DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"]) + expected = DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("to_add", [[Series([1, 1])], [Series([1, 1]), Series([1, 1])]]) def test_arith_list_of_arraylike_raise(to_add): # GH 36702. Raise when trying to add list of array-like to DataFrame - df = pd.DataFrame({"x": [1, 2], "y": [1, 2]}) + df = DataFrame({"x": [1, 2], "y": [1, 2]}) msg = f"Unable to coerce list of {type(to_add[0])} to Series/DataFrame" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 2877905ddced1..5772c0650ebe4 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -302,7 +302,7 @@ def f(dtype): def test_equals_different_blocks(self): # GH 9330 - df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2], "C": ["w", "z"]}) + df0 = DataFrame({"A": ["x", "y"], "B": [1, 2], "C": ["w", "z"]}) df1 = df0.reset_index()[["A", "B", "C"]] # this assert verifies that the above operations have # induced a block rearrangement @@ -607,16 +607,16 @@ def test_constructor_no_pandas_array(self): # Ensure that PandasArray isn't allowed inside Series # See https://github.com/pandas-dev/pandas/issues/23995 for more. arr = Series([1, 2, 3]).array - result = pd.DataFrame({"A": arr}) - expected = pd.DataFrame({"A": [1, 2, 3]}) + result = DataFrame({"A": arr}) + expected = DataFrame({"A": [1, 2, 3]}) tm.assert_frame_equal(result, expected) assert isinstance(result._mgr.blocks[0], IntBlock) def test_add_column_with_pandas_array(self): # GH 26390 - df = pd.DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) + df = DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) df["c"] = pd.arrays.PandasArray(np.array([1, 2, None, 3], dtype=object)) - df2 = pd.DataFrame( + df2 = DataFrame( { "a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"], @@ -630,7 +630,7 @@ def test_add_column_with_pandas_array(self): def test_to_dict_of_blocks_item_cache(): # Calling to_dict_of_blocks should not poison item_cache - df = pd.DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) + df = DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) df["c"] = pd.arrays.PandasArray(np.array([1, 2, None, 3], dtype=object)) mgr = df._mgr assert len(mgr.blocks) == 3 # i.e. not consolidated @@ -648,7 +648,7 @@ def test_to_dict_of_blocks_item_cache(): def test_update_inplace_sets_valid_block_values(): # https://github.com/pandas-dev/pandas/issues/33457 - df = pd.DataFrame({"a": Series([1, 2, None], dtype="category")}) + df = DataFrame({"a": Series([1, 2, None], dtype="category")}) # inplace update of a single column df["a"].fillna(1, inplace=True) @@ -664,7 +664,7 @@ def test_nonconsolidated_item_cache_take(): # https://github.com/pandas-dev/pandas/issues/35521 # create non-consolidated dataframe with object dtype columns - df = pd.DataFrame() + df = DataFrame() df["col1"] = Series(["a"], dtype=object) df["col2"] = Series([0], dtype=object) @@ -678,6 +678,6 @@ def test_nonconsolidated_item_cache_take(): # now setting value should update actual dataframe df.at[0, "col1"] = "A" - expected = pd.DataFrame({"col1": ["A"], "col2": [0]}, dtype=object) + expected = DataFrame({"col1": ["A"], "col2": [0]}, dtype=object) tm.assert_frame_equal(df, expected) assert df.at[0, "col1"] == "A" diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 2bc6953217cf8..acc87defb568c 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -154,17 +154,17 @@ def test_constructor_dtype_list_data(self): @pytest.mark.skipif(_np_version_under1p19, reason="NumPy change.") def test_constructor_list_of_2d_raises(self): # https://github.com/pandas-dev/pandas/issues/32289 - a = pd.DataFrame() + a = DataFrame() b = np.empty((0, 0)) with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"): - pd.DataFrame([a]) + DataFrame([a]) with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"): - pd.DataFrame([b]) + DataFrame([b]) - a = pd.DataFrame({"A": [1, 2]}) + a = DataFrame({"A": [1, 2]}) with pytest.raises(ValueError, match=r"shape=\(2, 2, 1\)"): - pd.DataFrame([a, a]) + DataFrame([a, a]) def test_constructor_mixed_dtypes(self): def _make_mixed_dtypes_df(typ, ad=None): @@ -1101,10 +1101,10 @@ def test_constructor_list_of_lists(self): def test_constructor_list_like_data_nested_list_column(self): # GH 32173 arrays = [list("abcd"), list("cdef")] - result = pd.DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays) + result = DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays) mi = MultiIndex.from_arrays(arrays) - expected = pd.DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=mi) + expected = DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=mi) tm.assert_frame_equal(result, expected) @@ -1655,10 +1655,10 @@ def test_constructor_index_names(self, name_in1, name_in2, name_in3, name_out): series = { c: Series([0, 1, 2], index=i) for i, c in zip(indices, ["x", "y", "z"]) } - result = pd.DataFrame(series) + result = DataFrame(series) exp_ind = pd.Index(["a", "b", "c", "d", "e"], name=name_out) - expected = pd.DataFrame( + expected = DataFrame( { "x": [0, 1, 2, np.nan, np.nan], "y": [np.nan, 0, 1, 2, np.nan], @@ -2342,7 +2342,7 @@ def test_from_records_empty_with_nonempty_fields_gh3682(self): def test_check_dtype_empty_numeric_column(self, dtype): # GH24386: Ensure dtypes are set correctly for an empty DataFrame. # Empty DataFrame is generated via dictionary data with non-overlapping columns. - data = pd.DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype) + data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype) assert data.b.dtype == dtype @@ -2352,7 +2352,7 @@ def test_check_dtype_empty_numeric_column(self, dtype): def test_check_dtype_empty_string_column(self, dtype): # GH24386: Ensure dtypes are set correctly for an empty DataFrame. # Empty DataFrame is generated via dictionary data with non-overlapping columns. - data = pd.DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype) + data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype) assert data.b.dtype.name == "object" @@ -2668,7 +2668,7 @@ def test_from_datetime_subclass(self): class DatetimeSubclass(datetime): pass - data = pd.DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]}) + data = DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]}) assert data.datetime.dtype == "datetime64[ns]" def test_with_mismatched_index_length_raises(self): @@ -2823,4 +2823,4 @@ def test_construction_from_set_raises(self): # https://github.com/pandas-dev/pandas/issues/32582 msg = "Set type is unordered" with pytest.raises(TypeError, match=msg): - pd.DataFrame({"a": {1, 2, 3}}) + DataFrame({"a": {1, 2, 3}}) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 96e56c329475c..d44c62e1defc7 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -35,21 +35,21 @@ def test_concat_empty_dataframe_dtypes(self): assert result["c"].dtype == np.float64 def test_empty_frame_dtypes(self): - empty_df = pd.DataFrame() + empty_df = DataFrame() tm.assert_series_equal(empty_df.dtypes, Series(dtype=object)) - nocols_df = pd.DataFrame(index=[1, 2, 3]) + nocols_df = DataFrame(index=[1, 2, 3]) tm.assert_series_equal(nocols_df.dtypes, Series(dtype=object)) - norows_df = pd.DataFrame(columns=list("abc")) + norows_df = DataFrame(columns=list("abc")) tm.assert_series_equal(norows_df.dtypes, Series(object, index=list("abc"))) - norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32) + norows_int_df = DataFrame(columns=list("abc")).astype(np.int32) tm.assert_series_equal( norows_int_df.dtypes, Series(np.dtype("int32"), index=list("abc")) ) - df = pd.DataFrame(dict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3]) + df = DataFrame(dict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3]) ex_dtypes = Series(dict([("a", np.int64), ("b", np.bool_), ("c", np.float64)])) tm.assert_series_equal(df.dtypes, ex_dtypes) @@ -80,7 +80,7 @@ def test_datetime_with_tz_dtypes(self): def test_dtypes_are_correct_after_column_slice(self): # GH6525 - df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_) + df = DataFrame(index=range(5), columns=list("abc"), dtype=np.float_) tm.assert_series_equal( df.dtypes, Series(dict([("a", np.float_), ("b", np.float_), ("c", np.float_)])), @@ -107,7 +107,7 @@ def test_dtypes_gh8722(self, float_string_frame): def test_singlerow_slice_categoricaldtype_gives_series(self): # GH29521 - df = pd.DataFrame({"x": pd.Categorical("a b c d e".split())}) + df = DataFrame({"x": pd.Categorical("a b c d e".split())}) result = df.iloc[0] raw_cat = pd.Categorical(["a"], categories=["a", "b", "c", "d", "e"]) expected = Series(raw_cat, index=["x"], name=0, dtype="category") @@ -227,7 +227,7 @@ def test_is_homogeneous_type(self, data, expected): assert data._is_homogeneous_type is expected def test_asarray_homogenous(self): - df = pd.DataFrame({"A": pd.Categorical([1, 2]), "B": pd.Categorical([1, 2])}) + df = DataFrame({"A": pd.Categorical([1, 2]), "B": pd.Categorical([1, 2])}) result = np.asarray(df) # may change from object in the future expected = np.array([[1, 1], [2, 2]], dtype="object") @@ -237,12 +237,12 @@ def test_str_to_small_float_conversion_type(self): # GH 20388 np.random.seed(13) col_data = [str(np.random.random() * 1e-12) for _ in range(5)] - result = pd.DataFrame(col_data, columns=["A"]) - expected = pd.DataFrame(col_data, columns=["A"], dtype=object) + result = DataFrame(col_data, columns=["A"]) + expected = DataFrame(col_data, columns=["A"], dtype=object) tm.assert_frame_equal(result, expected) # change the dtype of the elements from object to float one by one result.loc[result.index, "A"] = [float(x) for x in col_data] - expected = pd.DataFrame(col_data, columns=["A"], dtype=float) + expected = DataFrame(col_data, columns=["A"], dtype=float) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( @@ -251,14 +251,14 @@ def test_str_to_small_float_conversion_type(self): def test_convert_dtypes(self, convert_integer, expected): # Specific types are tested in tests/series/test_dtypes.py # Just check that it works for DataFrame here - df = pd.DataFrame( + df = DataFrame( { "a": Series([1, 2, 3], dtype=np.dtype("int32")), "b": Series(["x", "y", "z"], dtype=np.dtype("O")), } ) result = df.convert_dtypes(True, True, convert_integer, False) - expected = pd.DataFrame( + expected = DataFrame( { "a": Series([1, 2, 3], dtype=expected), "b": Series(["x", "y", "z"], dtype="string"), diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py index 07cd307c8cc54..2438c743f3b8a 100644 --- a/pandas/tests/frame/test_join.py +++ b/pandas/tests/frame/test_join.py @@ -235,7 +235,7 @@ def test_join_str_datetime(self): def test_join_multiindex_leftright(self): # GH 10741 - df1 = pd.DataFrame( + df1 = DataFrame( [ ["a", "x", 0.471780], ["a", "y", 0.774908], @@ -250,11 +250,11 @@ def test_join_multiindex_leftright(self): columns=["first", "second", "value1"], ).set_index(["first", "second"]) - df2 = pd.DataFrame( - [["a", 10], ["b", 20]], columns=["first", "value2"] - ).set_index(["first"]) + df2 = DataFrame([["a", 10], ["b", 20]], columns=["first", "value2"]).set_index( + ["first"] + ) - exp = pd.DataFrame( + exp = DataFrame( [ [0.471780, 10], [0.774908, 10], @@ -277,7 +277,7 @@ def test_join_multiindex_leftright(self): exp_idx = pd.MultiIndex.from_product( [["a", "b"], ["x", "y", "z"]], names=["first", "second"] ) - exp = pd.DataFrame( + exp = DataFrame( [ [0.471780, 10], [0.774908, 10], diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 024403189409c..f3f2bbe1d160e 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -135,7 +135,7 @@ def test_dataframe_sub_numexpr_path(self): def test_query_non_str(self): # GH 11485 - df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]}) + df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]}) msg = "expr must be a string to be evaluated" with pytest.raises(ValueError, match=msg): @@ -146,7 +146,7 @@ def test_query_non_str(self): def test_query_empty_string(self): # GH 13139 - df = pd.DataFrame({"A": [1, 2, 3]}) + df = DataFrame({"A": [1, 2, 3]}) msg = "expr cannot be an empty string" with pytest.raises(ValueError, match=msg): @@ -162,9 +162,9 @@ def test_eval_resolvers_as_list(self): def test_eval_object_dtype_binop(self): # GH#24883 - df = pd.DataFrame({"a1": ["Y", "N"]}) + df = DataFrame({"a1": ["Y", "N"]}) res = df.eval("c = ((a1 == 'Y') & True)") - expected = pd.DataFrame({"a1": ["Y", "N"], "c": [True, False]}) + expected = DataFrame({"a1": ["Y", "N"], "c": [True, False]}) tm.assert_frame_equal(res, expected) @@ -716,12 +716,12 @@ def test_check_tz_aware_index_query(self, tz_aware_fixture): df_index = pd.date_range( start="2019-01-01", freq="1d", periods=10, tz=tz, name="time" ) - expected = pd.DataFrame(index=df_index) - df = pd.DataFrame(index=df_index) + expected = DataFrame(index=df_index) + df = DataFrame(index=df_index) result = df.query('"2018-01-03 00:00:00+00" < time') tm.assert_frame_equal(result, expected) - expected = pd.DataFrame(df_index) + expected = DataFrame(df_index) result = df.reset_index().query('"2018-01-03 00:00:00+00" < time') tm.assert_frame_equal(result, expected) @@ -1045,7 +1045,7 @@ def test_query_single_element_booleans(self, parser, engine): def test_query_string_scalar_variable(self, parser, engine): skip_if_no_pandas_parser(parser) - df = pd.DataFrame( + df = DataFrame( { "Symbol": ["BUD US", "BUD US", "IBM US", "IBM US"], "Price": [109.70, 109.72, 183.30, 183.35], diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 67c53a56eebe9..83a3b65d4b601 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -142,7 +142,7 @@ def test_stack_mixed_level(self): def test_unstack_not_consolidated(self): # Gh#34708 - df = pd.DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]}) + df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]}) df2 = df[["x"]] df2["y"] = df["y"] assert len(df2._mgr.blocks) == 2 @@ -352,10 +352,10 @@ def test_unstack_tuplename_in_multiindex(self): idx = pd.MultiIndex.from_product( [["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")] ) - df = pd.DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx) + df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx) result = df.unstack(("A", "a")) - expected = pd.DataFrame( + expected = DataFrame( [[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]], columns=pd.MultiIndex.from_tuples( [ @@ -413,17 +413,17 @@ def test_unstack_mixed_type_name_in_multiindex( idx = pd.MultiIndex.from_product( [["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"] ) - df = pd.DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx) + df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx) result = df.unstack(unstack_idx) - expected = pd.DataFrame( + expected = DataFrame( expected_values, columns=expected_columns, index=expected_index ) tm.assert_frame_equal(result, expected) def test_unstack_preserve_dtypes(self): # Checks fix for #11847 - df = pd.DataFrame( + df = DataFrame( dict( state=["IL", "MI", "NC"], index=["a", "b", "c"], @@ -595,7 +595,7 @@ def test_unstack_level_binding(self): names=["first", "second"], ) - expected = pd.DataFrame( + expected = DataFrame( np.array( [[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64 ), @@ -717,11 +717,11 @@ def test_unstack_non_unique_index_names(self): def test_unstack_unused_levels(self): # GH 17845: unused codes in index make unstack() cast int to float idx = pd.MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1] - df = pd.DataFrame([[1, 0]] * 3, index=idx) + df = DataFrame([[1, 0]] * 3, index=idx) result = df.unstack() exp_col = pd.MultiIndex.from_product([[0, 1], ["A", "B", "C"]]) - expected = pd.DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col) + expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col) tm.assert_frame_equal(result, expected) assert (result.columns.levels[1] == idx.levels[1]).all() @@ -730,9 +730,9 @@ def test_unstack_unused_levels(self): codes = [[0, 0, 1, 1], [0, 2, 0, 2]] idx = pd.MultiIndex(levels, codes) block = np.arange(4).reshape(2, 2) - df = pd.DataFrame(np.concatenate([block, block + 4]), index=idx) + df = DataFrame(np.concatenate([block, block + 4]), index=idx) result = df.unstack() - expected = pd.DataFrame( + expected = DataFrame( np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx ) tm.assert_frame_equal(result, expected) @@ -743,7 +743,7 @@ def test_unstack_unused_levels(self): codes = [[0, -1, 1, 1], [0, 2, -1, 2]] idx = pd.MultiIndex(levels, codes) data = np.arange(8) - df = pd.DataFrame(data.reshape(4, 2), index=idx) + df = DataFrame(data.reshape(4, 2), index=idx) cases = ( (0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]), @@ -754,17 +754,13 @@ def test_unstack_unused_levels(self): exp_data = np.zeros(18) * np.nan exp_data[idces] = data cols = pd.MultiIndex.from_product([[0, 1], col_level]) - expected = pd.DataFrame( - exp_data.reshape(3, 6), index=idx_level, columns=cols - ) + expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("cols", [["A", "C"], slice(None)]) def test_unstack_unused_level(self, cols): # GH 18562 : unused codes on the unstacked level - df = pd.DataFrame( - [[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"] - ) + df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"]) ind = df.set_index(["A", "B", "C"], drop=False) selection = ind.loc[(slice(None), slice(None), "I"), cols] @@ -780,7 +776,7 @@ def test_unstack_unused_level(self, cols): def test_unstack_long_index(self): # PH 32624: Error when using a lot of indices to unstack. # The error occurred only, if a lot of indices are used. - df = pd.DataFrame( + df = DataFrame( [[1]], columns=pd.MultiIndex.from_tuples([[0]], names=["c1"]), index=pd.MultiIndex.from_tuples( @@ -789,7 +785,7 @@ def test_unstack_long_index(self): ), ) result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"]) - expected = pd.DataFrame( + expected = DataFrame( [[1]], columns=pd.MultiIndex.from_tuples( [[0, 0, 1, 0, 0, 0, 1]], @@ -801,7 +797,7 @@ def test_unstack_long_index(self): def test_unstack_multi_level_cols(self): # PH 24729: Unstack a df with multi level columns - df = pd.DataFrame( + df = DataFrame( [[0.0, 0.0], [0.0, 0.0]], columns=pd.MultiIndex.from_tuples( [["B", "C"], ["B", "D"]], names=["c1", "c2"] @@ -814,7 +810,7 @@ def test_unstack_multi_level_cols(self): def test_unstack_multi_level_rows_and_cols(self): # PH 28306: Unstack df with multi level cols and rows - df = pd.DataFrame( + df = DataFrame( [[1, 2], [3, 4], [-1, -2], [-3, -4]], columns=pd.MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]), index=pd.MultiIndex.from_tuples( @@ -918,7 +914,7 @@ def verify(df): verify(udf[col]) # GH7403 - df = pd.DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)}) + df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)}) df.iloc[3, 1] = np.NaN left = df.set_index(["A", "B"]).unstack(0) @@ -947,9 +943,7 @@ def verify(df): right = DataFrame(vals, columns=cols, index=idx) tm.assert_frame_equal(left, right) - df = pd.DataFrame( - {"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)} - ) + df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)}) df.iloc[3, 1] = np.NaN left = df.set_index(["A", "B"]).unstack(0) @@ -962,7 +956,7 @@ def verify(df): tm.assert_frame_equal(left, right) # GH7401 - df = pd.DataFrame( + df = DataFrame( { "A": list("aaaaabbbbb"), "B": (date_range("2012-01-01", periods=5).tolist() * 2), @@ -1141,7 +1135,7 @@ def test_stack_preserve_categorical_dtype(self, ordered, labels): def test_stack_preserve_categorical_dtype_values(self): # GH-23077 cat = pd.Categorical(["a", "a", "b", "c"]) - df = pd.DataFrame({"A": cat, "B": cat}) + df = DataFrame({"A": cat, "B": cat}) result = df.stack() index = pd.MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]]) expected = Series( @@ -1159,10 +1153,10 @@ def test_stack_preserve_categorical_dtype_values(self): ) def test_stack_multi_columns_non_unique_index(self, index, columns): # GH-28301 - df = pd.DataFrame(index=index, columns=columns).fillna(1) + df = DataFrame(index=index, columns=columns).fillna(1) stacked = df.stack() new_index = pd.MultiIndex.from_tuples(stacked.index.to_numpy()) - expected = pd.DataFrame( + expected = DataFrame( stacked.to_numpy(), index=new_index, columns=stacked.columns ) tm.assert_frame_equal(stacked, expected) @@ -1175,7 +1169,7 @@ def test_unstack_mixed_extension_types(self, level): index = pd.MultiIndex.from_tuples( [("A", 0), ("A", 1), ("B", 1)], names=["a", "b"] ) - df = pd.DataFrame( + df = DataFrame( { "A": pd.core.arrays.integer_array([0, 1, None]), "B": pd.Categorical(["a", "a", "b"]), @@ -1196,10 +1190,10 @@ def test_unstack_mixed_extension_types(self, level): def test_unstack_swaplevel_sortlevel(self, level): # GH 20994 mi = pd.MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"]) - df = pd.DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"]) + df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"]) df.columns.name = "foo" - expected = pd.DataFrame( + expected = DataFrame( [[3, 1, 2, 0]], columns=pd.MultiIndex.from_tuples( [("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"] @@ -1220,14 +1214,14 @@ def test_unstack_fill_frame_object(): # By default missing values will be NaN result = data.unstack() - expected = pd.DataFrame( + expected = DataFrame( {"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz") ) tm.assert_frame_equal(result, expected) # Fill with any value replaces missing values as expected result = data.unstack(fill_value="d") - expected = pd.DataFrame( + expected = DataFrame( {"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz") ) tm.assert_frame_equal(result, expected) @@ -1235,7 +1229,7 @@ def test_unstack_fill_frame_object(): def test_unstack_timezone_aware_values(): # GH 18338 - df = pd.DataFrame( + df = DataFrame( { "timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")], "a": ["a"], @@ -1245,7 +1239,7 @@ def test_unstack_timezone_aware_values(): columns=["timestamp", "a", "b", "c"], ) result = df.set_index(["a", "b"]).unstack() - expected = pd.DataFrame( + expected = DataFrame( [[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]], index=pd.Index(["a"], name="a"), columns=pd.MultiIndex( @@ -1262,7 +1256,7 @@ def test_stack_timezone_aware_values(): ts = pd.date_range( freq="D", start="20180101", end="20180103", tz="America/New_York" ) - df = pd.DataFrame({"A": ts}, index=["a", "b", "c"]) + df = DataFrame({"A": ts}, index=["a", "b", "c"]) result = df.stack() expected = Series( ts, @@ -1307,11 +1301,11 @@ def test_unstacking_multi_index_df(): def test_stack_positional_level_duplicate_column_names(): # https://github.com/pandas-dev/pandas/issues/36353 columns = pd.MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"]) - df = pd.DataFrame([[1, 1, 1, 1]], columns=columns) + df = DataFrame([[1, 1, 1, 1]], columns=columns) result = df.stack(0) new_columns = pd.Index(["y", "z"], name="a") new_index = pd.MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"]) - expected = pd.DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns) + expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index e4e22953397ca..f3667c4dd9d9d 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -45,14 +45,10 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self): data_ns = np.array([1, "nat"], dtype="datetime64[ns]") result = pd.Series(data_ns).to_frame() result["new"] = data_ns - expected = pd.DataFrame( - {0: [1, None], "new": [1, None]}, dtype="datetime64[ns]" - ) + expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]") tm.assert_frame_equal(result, expected) # OutOfBoundsDatetime error shouldn't occur data_s = np.array([1, "nat"], dtype="datetime64[s]") result["new"] = data_s - expected = pd.DataFrame( - {0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]" - ) + expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]") tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index b085704e8b06f..38032ff717afc 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -133,7 +133,7 @@ def test_to_csv_from_csv4(self): with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path: # GH 10833 (TimedeltaIndex formatting) dt = pd.Timedelta(seconds=1) - df = pd.DataFrame( + df = DataFrame( {"dt_data": [i * dt for i in range(3)]}, index=pd.Index([i * dt for i in range(3)], name="dt_index"), ) @@ -1257,7 +1257,7 @@ def test_to_csv_quoting(self): # xref gh-7791: make sure the quoting parameter is passed through # with multi-indexes - df = pd.DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}) + df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}) df = df.set_index(["a", "b"]) expected_rows = ['"a","b","c"', '"1","3","5"', '"2","4","6"'] @@ -1270,7 +1270,7 @@ def test_period_index_date_overflow(self): dates = ["1990-01-01", "2000-01-01", "3005-01-01"] index = pd.PeriodIndex(dates, freq="D") - df = pd.DataFrame([4, 5, 6], index=index) + df = DataFrame([4, 5, 6], index=index) result = df.to_csv() expected_rows = [",0", "1990-01-01,4", "2000-01-01,5", "3005-01-01,6"] @@ -1288,7 +1288,7 @@ def test_period_index_date_overflow(self): dates = ["1990-01-01", pd.NaT, "3005-01-01"] index = pd.PeriodIndex(dates, freq="D") - df = pd.DataFrame([4, 5, 6], index=index) + df = DataFrame([4, 5, 6], index=index) result = df.to_csv() expected_rows = [",0", "1990-01-01,4", ",5", "3005-01-01,6"] @@ -1298,7 +1298,7 @@ def test_period_index_date_overflow(self): def test_multi_index_header(self): # see gh-5539 columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]) - df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) df.columns = columns header = ["a", "b", "c", "d"] @@ -1311,7 +1311,7 @@ def test_multi_index_header(self): def test_to_csv_single_level_multi_index(self): # see gh-26303 index = pd.Index([(1,), (2,), (3,)]) - df = pd.DataFrame([[1, 2, 3]], columns=index) + df = DataFrame([[1, 2, 3]], columns=index) df = df.reindex(columns=[(1,), (3,)]) expected = ",1,3\n0,1,3\n" result = df.to_csv(line_terminator="\n") @@ -1319,7 +1319,7 @@ def test_to_csv_single_level_multi_index(self): def test_gz_lineend(self): # GH 25311 - df = pd.DataFrame({"a": [1, 2]}) + df = DataFrame({"a": [1, 2]}) expected_rows = ["a", "1", "2"] expected = tm.convert_rows_list_to_csv_str(expected_rows) with tm.ensure_clean("__test_gz_lineend.csv.gz") as path: diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py index ad0d1face53cf..9ecc0e6194912 100644 --- a/pandas/tests/generic/test_frame.py +++ b/pandas/tests/generic/test_frame.py @@ -24,7 +24,7 @@ def test_rename_mi(self): @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) def test_set_axis_name(self, func): - df = pd.DataFrame([[1, 2], [3, 4]]) + df = DataFrame([[1, 2], [3, 4]]) result = methodcaller(func, "foo")(df) assert df.index.name is None diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index fe1c476ed2205..bc666ade9f13d 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -407,7 +407,7 @@ def test_sample(self): def test_sample_upsampling_without_replacement(self): # GH27451 - df = pd.DataFrame({"A": list("abc")}) + df = DataFrame({"A": list("abc")}) msg = ( "Replace has to be set to `True` when " "upsampling the population `frac` > 1." @@ -418,7 +418,7 @@ def test_sample_upsampling_without_replacement(self): def test_sample_is_copy(self): # GH-27357, GH-30784: ensure the result of sample is an actual copy and # doesn't track the parent dataframe / doesn't give SettingWithCopy warnings - df = pd.DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"]) + df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"]) df2 = df.sample(3) with tm.assert_produces_warning(None): @@ -542,7 +542,7 @@ def test_sample(sel): easy_weight_list = [0] * 10 easy_weight_list[5] = 1 - df = pd.DataFrame( + df = DataFrame( { "col1": range(10, 20), "col2": range(20, 30), @@ -578,7 +578,7 @@ def test_sample(sel): ### # Test axis argument - df = pd.DataFrame({"col1": range(10), "col2": ["a"] * 10}) + df = DataFrame({"col1": range(10), "col2": ["a"] * 10}) second_column_weight = [0, 1] tm.assert_frame_equal( df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]] @@ -615,7 +615,7 @@ def test_sample(sel): easy_weight_list = [0] * 3 easy_weight_list[2] = 1 - df = pd.DataFrame( + df = DataFrame( {"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10} ) sample1 = df.sample(n=1, axis=1, weights=easy_weight_list) @@ -663,7 +663,7 @@ def test_sample(sel): ) def test_sample_random_state(self, func_str, arg): # GH32503 - df = pd.DataFrame({"col1": range(10, 20), "col2": range(20, 30)}) + df = DataFrame({"col1": range(10, 20), "col2": range(20, 30)}) result = df.sample(n=3, random_state=eval(func_str)(arg)) expected = df.sample(n=3, random_state=com.random_state(eval(func_str)(arg))) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index c7a52dd45fadc..a1cbf38d8eae6 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -20,7 +20,7 @@ def test_groupby_agg_no_extra_calls(): # GH#31760 - df = pd.DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]}) + df = DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]}) gb = df.groupby("key")["value"] def dummy_func(x): @@ -115,13 +115,13 @@ def test_groupby_aggregation_multi_level_column(): [True, True, np.nan, False], [True, True, np.nan, False], ] - df = pd.DataFrame( + df = DataFrame( data=lst, columns=pd.MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]), ) result = df.groupby(level=1, axis=1).sum() - expected = pd.DataFrame({0: [2.0, 1, 1, 1], 1: [1, 0, 1, 1]}) + expected = DataFrame({0: [2.0, 1, 1, 1], 1: [1, 0, 1, 1]}) tm.assert_frame_equal(result, expected) @@ -253,7 +253,7 @@ def test_agg_multiple_functions_maintain_order(df): def test_agg_multiple_functions_same_name(): # GH 30880 - df = pd.DataFrame( + df = DataFrame( np.random.randn(1000, 3), index=pd.date_range("1/1/2012", freq="S", periods=1000), columns=["A", "B", "C"], @@ -266,7 +266,7 @@ def test_agg_multiple_functions_same_name(): expected_values = np.array( [df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]] ).T - expected = pd.DataFrame( + expected = DataFrame( expected_values, columns=expected_columns, index=expected_index ) tm.assert_frame_equal(result, expected) @@ -275,7 +275,7 @@ def test_agg_multiple_functions_same_name(): def test_agg_multiple_functions_same_name_with_ohlc_present(): # GH 30880 # ohlc expands dimensions, so different test to the above is required. - df = pd.DataFrame( + df = DataFrame( np.random.randn(1000, 3), index=pd.date_range("1/1/2012", freq="S", periods=1000), columns=["A", "B", "C"], @@ -298,7 +298,7 @@ def test_agg_multiple_functions_same_name_with_ohlc_present(): [df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]] ).T expected_values = np.hstack([df.resample("3T").A.ohlc(), non_ohlc_expected_values]) - expected = pd.DataFrame( + expected = DataFrame( expected_values, columns=expected_columns, index=expected_index ) # PerformanceWarning is thrown by `assert col in right` in assert_frame_equal @@ -382,7 +382,7 @@ def test_multi_function_flexible_mix(df): def test_groupby_agg_coercing_bools(): # issue 14873 - dat = pd.DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]}) + dat = DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]}) gp = dat.groupby("a") index = Index([1, 2], name="a") @@ -410,7 +410,7 @@ def test_groupby_agg_coercing_bools(): def test_bool_agg_dtype(op): # GH 7001 # Bool sum aggregations result in int - df = pd.DataFrame({"a": [1, 1], "b": [False, True]}) + df = DataFrame({"a": [1, 1], "b": [False, True]}) s = df.set_index("a")["b"] result = op(df.groupby("a"))["b"].dtype @@ -422,7 +422,7 @@ def test_bool_agg_dtype(op): def test_order_aggregate_multiple_funcs(): # GH 25692 - df = pd.DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]}) + df = DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]}) res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"]) result = res.columns.levels[1] @@ -436,7 +436,7 @@ def test_order_aggregate_multiple_funcs(): @pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"]) def test_uint64_type_handling(dtype, how): # GH 26310 - df = pd.DataFrame({"x": 6903052872240755750, "y": [1, 2]}) + df = DataFrame({"x": 6903052872240755750, "y": [1, 2]}) expected = df.groupby("y").agg({"x": how}) df.x = df.x.astype(dtype) result = df.groupby("y").agg({"x": how}) @@ -447,7 +447,7 @@ def test_uint64_type_handling(dtype, how): def test_func_duplicates_raises(): # GH28426 msg = "Function names" - df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}) + df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}) with pytest.raises(SpecificationError, match=msg): df.groupby("A").agg(["min", "min"]) @@ -471,7 +471,7 @@ def test_agg_index_has_complex_internals(index): def test_agg_split_block(): # https://github.com/pandas-dev/pandas/issues/31522 - df = pd.DataFrame( + df = DataFrame( { "key1": ["a", "a", "b", "b", "a"], "key2": ["one", "two", "one", "two", "one"], @@ -479,7 +479,7 @@ def test_agg_split_block(): } ) result = df.groupby("key1").min() - expected = pd.DataFrame( + expected = DataFrame( {"key2": ["one", "one"], "key3": ["six", "six"]}, index=pd.Index(["a", "b"], name="key1"), ) @@ -488,7 +488,7 @@ def test_agg_split_block(): def test_agg_split_object_part_datetime(): # https://github.com/pandas-dev/pandas/pull/31616 - df = pd.DataFrame( + df = DataFrame( { "A": pd.date_range("2000", periods=4), "B": ["a", "b", "c", "d"], @@ -499,7 +499,7 @@ def test_agg_split_object_part_datetime(): } ).astype(object) result = df.groupby([0, 0, 0, 0]).min() - expected = pd.DataFrame( + expected = DataFrame( { "A": [pd.Timestamp("2000")], "B": ["a"], @@ -517,7 +517,7 @@ def test_series_named_agg(self): df = Series([1, 2, 3, 4]) gr = df.groupby([0, 0, 1, 1]) result = gr.agg(a="sum", b="min") - expected = pd.DataFrame( + expected = DataFrame( {"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=[0, 1] ) tm.assert_frame_equal(result, expected) @@ -533,20 +533,20 @@ def test_no_args_raises(self): # but we do allow this result = gr.agg([]) - expected = pd.DataFrame() + expected = DataFrame() tm.assert_frame_equal(result, expected) def test_series_named_agg_duplicates_no_raises(self): # GH28426 gr = Series([1, 2, 3]).groupby([0, 0, 1]) grouped = gr.agg(a="sum", b="sum") - expected = pd.DataFrame({"a": [3, 3], "b": [3, 3]}) + expected = DataFrame({"a": [3, 3], "b": [3, 3]}) tm.assert_frame_equal(expected, grouped) def test_mangled(self): gr = Series([1, 2, 3]).groupby([0, 0, 1]) result = gr.agg(a=lambda x: 0, b=lambda x: 1) - expected = pd.DataFrame({"a": [0, 0], "b": [1, 1]}) + expected = DataFrame({"a": [0, 0], "b": [1, 1]}) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( @@ -567,11 +567,11 @@ def test_named_agg_nametuple(self, inp): class TestNamedAggregationDataFrame: def test_agg_relabel(self): - df = pd.DataFrame( + df = DataFrame( {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]} ) result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")) - expected = pd.DataFrame( + expected = DataFrame( {"a_max": [1, 3], "b_max": [6, 8]}, index=pd.Index(["a", "b"], name="group"), columns=["a_max", "b_max"], @@ -588,7 +588,7 @@ def test_agg_relabel(self): b_max=("B", "max"), a_98=("A", p98), ) - expected = pd.DataFrame( + expected = DataFrame( { "b_min": [5, 7], "a_min": [0, 2], @@ -603,12 +603,12 @@ def test_agg_relabel(self): tm.assert_frame_equal(result, expected) def test_agg_relabel_non_identifier(self): - df = pd.DataFrame( + df = DataFrame( {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]} ) result = df.groupby("group").agg(**{"my col": ("A", "max")}) - expected = pd.DataFrame( + expected = DataFrame( {"my col": [1, 3]}, index=pd.Index(["a", "b"], name="group") ) tm.assert_frame_equal(result, expected) @@ -616,10 +616,10 @@ def test_agg_relabel_non_identifier(self): def test_duplicate_no_raises(self): # GH 28426, if use same input function on same column, # no error should raise - df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}) + df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}) grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min")) - expected = pd.DataFrame( + expected = DataFrame( {"a": [1, 3], "b": [1, 3]}, index=pd.Index([0, 1], name="A") ) tm.assert_frame_equal(grouped, expected) @@ -629,34 +629,32 @@ def test_duplicate_no_raises(self): quant50.__name__ = "quant50" quant70.__name__ = "quant70" - test = pd.DataFrame( - {"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]} - ) + test = DataFrame({"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]}) grouped = test.groupby("col1").agg( quantile_50=("col2", quant50), quantile_70=("col2", quant70) ) - expected = pd.DataFrame( + expected = DataFrame( {"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]}, index=pd.Index(["a", "b"], name="col1"), ) tm.assert_frame_equal(grouped, expected) def test_agg_relabel_with_level(self): - df = pd.DataFrame( + df = DataFrame( {"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}, index=pd.MultiIndex.from_product([["A", "B"], ["a", "b"]]), ) result = df.groupby(level=0).agg( aa=("A", "max"), bb=("A", "min"), cc=("B", "mean") ) - expected = pd.DataFrame( + expected = DataFrame( {"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"] ) tm.assert_frame_equal(result, expected) def test_agg_relabel_other_raises(self): - df = pd.DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]}) + df = DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]}) grouped = df.groupby("A") match = "Must provide" with pytest.raises(TypeError, match=match): @@ -669,12 +667,12 @@ def test_agg_relabel_other_raises(self): grouped.agg(a=("B", "max"), b=(1, 2, 3)) def test_missing_raises(self): - df = pd.DataFrame({"A": [0, 1], "B": [1, 2]}) + df = DataFrame({"A": [0, 1], "B": [1, 2]}) with pytest.raises(KeyError, match="Column 'C' does not exist"): df.groupby("A").agg(c=("C", "sum")) def test_agg_namedtuple(self): - df = pd.DataFrame({"A": [0, 1], "B": [1, 2]}) + df = DataFrame({"A": [0, 1], "B": [1, 2]}) result = df.groupby("A").agg( b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count") ) @@ -682,9 +680,9 @@ def test_agg_namedtuple(self): tm.assert_frame_equal(result, expected) def test_mangled(self): - df = pd.DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]}) + df = DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]}) result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1)) - expected = pd.DataFrame( + expected = DataFrame( {"b": [0, 0], "c": [1, 1]}, index=pd.Index([0, 1], name="A") ) tm.assert_frame_equal(result, expected) @@ -773,9 +771,9 @@ def test_agg_relabel_multiindex_duplicates(): @pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}]) def test_groupby_aggregate_empty_key(kwargs): # GH: 32580 - df = pd.DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]}) + df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]}) result = df.groupby("a").agg(kwargs) - expected = pd.DataFrame( + expected = DataFrame( [1, 4], index=pd.Index([1, 2], dtype="int64", name="a"), columns=pd.MultiIndex.from_tuples([["c", "min"]]), @@ -785,9 +783,9 @@ def test_groupby_aggregate_empty_key(kwargs): def test_groupby_aggregate_empty_key_empty_return(): # GH: 32580 Check if everything works, when return is empty - df = pd.DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]}) + df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]}) result = df.groupby("a").agg({"b": []}) - expected = pd.DataFrame(columns=pd.MultiIndex(levels=[["b"], []], codes=[[], []])) + expected = DataFrame(columns=pd.MultiIndex(levels=[["b"], []], codes=[[], []])) tm.assert_frame_equal(result, expected) @@ -795,13 +793,13 @@ def test_grouby_agg_loses_results_with_as_index_false_relabel(): # GH 32240: When the aggregate function relabels column names and # as_index=False is specified, the results are dropped. - df = pd.DataFrame( + df = DataFrame( {"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]} ) grouped = df.groupby("key", as_index=False) result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) - expected = pd.DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]}) + expected = DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]}) tm.assert_frame_equal(result, expected) @@ -810,7 +808,7 @@ def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex(): # as_index=False is specified, the results are dropped. Check if # multiindex is returned in the right order - df = pd.DataFrame( + df = DataFrame( { "key": ["x", "y", "x", "y", "x", "x"], "key1": ["a", "b", "c", "b", "a", "c"], @@ -820,7 +818,7 @@ def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex(): grouped = df.groupby(["key", "key1"], as_index=False) result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) - expected = pd.DataFrame( + expected = DataFrame( {"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]} ) tm.assert_frame_equal(result, expected) @@ -832,10 +830,10 @@ def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex(): def test_multiindex_custom_func(func): # GH 31777 data = [[1, 4, 2], [5, 7, 1]] - df = pd.DataFrame(data, columns=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 3]])) + df = DataFrame(data, columns=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 3]])) result = df.groupby(np.array([0, 1])).agg(func) expected_dict = {(1, 3): {0: 1, 1: 5}, (1, 4): {0: 4, 1: 7}, (2, 3): {0: 2, 1: 1}} - expected = pd.DataFrame(expected_dict) + expected = DataFrame(expected_dict) tm.assert_frame_equal(result, expected) @@ -868,13 +866,13 @@ def test_lambda_named_agg(func): def test_aggregate_mixed_types(): # GH 16916 - df = pd.DataFrame( + df = DataFrame( data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc") ) df["grouping"] = ["group 1", "group 1", 2] result = df.groupby("grouping").aggregate(lambda x: x.tolist()) expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]] - expected = pd.DataFrame( + expected = DataFrame( expected_data, index=Index([2, "group 1"], dtype="object", name="grouping"), columns=Index(["X", "Y", "Z"], dtype="object"), @@ -897,9 +895,9 @@ def aggfunc(x): else: return pd.NA - df = pd.DataFrame({"A": pd.array([1, 2, 3])}) + df = DataFrame({"A": pd.array([1, 2, 3])}) result = df.groupby([1, 1, 2]).agg(aggfunc) - expected = pd.DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2]) + expected = DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2]) tm.assert_frame_equal(result, expected) @@ -908,7 +906,7 @@ def test_groupby_aggregate_period_column(func): # GH 31471 groups = [1, 2] periods = pd.period_range("2020", periods=2, freq="Y") - df = pd.DataFrame({"a": groups, "b": periods}) + df = DataFrame({"a": groups, "b": periods}) result = getattr(df.groupby("a")["b"], func)() idx = pd.Int64Index([1, 2], name="a") @@ -922,21 +920,21 @@ def test_groupby_aggregate_period_frame(func): # GH 31471 groups = [1, 2] periods = pd.period_range("2020", periods=2, freq="Y") - df = pd.DataFrame({"a": groups, "b": periods}) + df = DataFrame({"a": groups, "b": periods}) result = getattr(df.groupby("a"), func)() idx = pd.Int64Index([1, 2], name="a") - expected = pd.DataFrame({"b": periods}, index=idx) + expected = DataFrame({"b": periods}, index=idx) tm.assert_frame_equal(result, expected) class TestLambdaMangling: def test_basic(self): - df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}) + df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}) result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]}) - expected = pd.DataFrame( + expected = DataFrame( {("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]}, index=pd.Index([0, 1], name="A"), ) @@ -945,7 +943,7 @@ def test_basic(self): def test_mangle_series_groupby(self): gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1]) result = gr.agg([lambda x: 0, lambda x: 1]) - expected = pd.DataFrame({"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]}) + expected = DataFrame({"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]}) tm.assert_frame_equal(result, expected) @pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.") @@ -953,16 +951,16 @@ def test_with_kwargs(self): f1 = lambda x, y, b=1: x.sum() + y + b f2 = lambda x, y, b=2: x.sum() + y * b result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0) - expected = pd.DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]}) + expected = DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]}) tm.assert_frame_equal(result, expected) result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10) - expected = pd.DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]}) + expected = DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]}) tm.assert_frame_equal(result, expected) def test_agg_with_one_lambda(self): # GH 25719, write tests for DataFrameGroupby.agg with only one lambda - df = pd.DataFrame( + df = DataFrame( { "kind": ["cat", "dog", "cat", "dog"], "height": [9.1, 6.0, 9.5, 34.0], @@ -971,7 +969,7 @@ def test_agg_with_one_lambda(self): ) columns = ["height_sqr_min", "height_max", "weight_max"] - expected = pd.DataFrame( + expected = DataFrame( { "height_sqr_min": [82.81, 36.00], "height_max": [9.5, 34.0], @@ -1002,7 +1000,7 @@ def test_agg_with_one_lambda(self): def test_agg_multiple_lambda(self): # GH25719, test for DataFrameGroupby.agg with multiple lambdas # with mixed aggfunc - df = pd.DataFrame( + df = DataFrame( { "kind": ["cat", "dog", "cat", "dog"], "height": [9.1, 6.0, 9.5, 34.0], @@ -1016,7 +1014,7 @@ def test_agg_multiple_lambda(self): "height_max_2", "weight_min", ] - expected = pd.DataFrame( + expected = DataFrame( { "height_sqr_min": [82.81, 36.00], "height_max": [9.5, 34.0], @@ -1053,9 +1051,9 @@ def test_agg_multiple_lambda(self): def test_groupby_get_by_index(): # GH 33439 - df = pd.DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]}) + df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]}) res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])}) - expected = pd.DataFrame(dict(A=["S", "W"], B=[1.0, 2.0])).set_index("A") + expected = DataFrame(dict(A=["S", "W"], B=[1.0, 2.0])).set_index("A") pd.testing.assert_frame_equal(res, expected) @@ -1071,7 +1069,7 @@ def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data): # test single aggregations on ordered categorical cols GHGH27800 # create the result dataframe - input_df = pd.DataFrame( + input_df = DataFrame( { "nr": [1, 2, 3, 4, 5, 6, 7, 8], "cat_ord": list("aabbccdd"), @@ -1088,7 +1086,7 @@ def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data): ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category" ) - expected_df = pd.DataFrame(data=exp_data, index=cat_index) + expected_df = DataFrame(data=exp_data, index=cat_index) tm.assert_frame_equal(result_df, expected_df) @@ -1105,7 +1103,7 @@ def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data): # test combined aggregations on ordered categorical cols GH27800 # create the result dataframe - input_df = pd.DataFrame( + input_df = DataFrame( { "nr": [1, 2, 3, 4, 5, 6, 7, 8], "cat_ord": list("aabbccdd"), @@ -1133,7 +1131,7 @@ def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data): multi_index_list.append([k, v]) multi_index = pd.MultiIndex.from_tuples(tuple(multi_index_list)) - expected_df = pd.DataFrame(data=exp_data, columns=multi_index, index=cat_index) + expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index) tm.assert_frame_equal(result_df, expected_df) @@ -1141,7 +1139,7 @@ def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data): def test_nonagg_agg(): # GH 35490 - Single/Multiple agg of non-agg function give same results # TODO: agg should raise for functions that don't aggregate - df = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]}) + df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]}) g = df.groupby("a") result = g.agg(["cumsum"]) @@ -1153,9 +1151,9 @@ def test_nonagg_agg(): def test_agg_no_suffix_index(): # GH36189 - df = pd.DataFrame([[4, 9]] * 3, columns=["A", "B"]) + df = DataFrame([[4, 9]] * 3, columns=["A", "B"]) result = df.agg(["sum", lambda x: x.sum(), lambda x: x.sum()]) - expected = pd.DataFrame( + expected = DataFrame( {"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "<lambda>", "<lambda>"] ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index 87ebd8b5a27fb..e01855c1b7761 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -176,7 +176,7 @@ def test__cython_agg_general(op, targop): ], ) def test_cython_agg_empty_buckets(op, targop, observed): - df = pd.DataFrame([11, 12, 13]) + df = DataFrame([11, 12, 13]) grps = range(0, 55, 5) # calling _cython_agg_general directly, instead of via the user API @@ -192,14 +192,14 @@ def test_cython_agg_empty_buckets(op, targop, observed): def test_cython_agg_empty_buckets_nanops(observed): # GH-18869 can't call nanops on empty groups, so hardcode expected # for these - df = pd.DataFrame([11, 12, 13], columns=["a"]) + df = DataFrame([11, 12, 13], columns=["a"]) grps = range(0, 25, 5) # add / sum result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general( "add" ) intervals = pd.interval_range(0, 20, freq=5) - expected = pd.DataFrame( + expected = DataFrame( {"a": [0, 0, 36, 0]}, index=pd.CategoricalIndex(intervals, name="a", ordered=True), ) @@ -212,7 +212,7 @@ def test_cython_agg_empty_buckets_nanops(observed): result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general( "prod" ) - expected = pd.DataFrame( + expected = DataFrame( {"a": [1, 1, 1716, 1]}, index=pd.CategoricalIndex(intervals, name="a", ordered=True), ) diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index a5f947cf656a0..15803d4b0ef94 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -143,7 +143,7 @@ def test_agg_cast_results_dtypes(): # xref #11444 u = [dt.datetime(2015, x + 1, 1) for x in range(12)] v = list("aaabbbbbbccd") - df = pd.DataFrame({"X": v, "Y": u}) + df = DataFrame({"X": v, "Y": u}) result = df.groupby("X")["Y"].agg(len) expected = df.groupby("X")["Y"].count() @@ -216,7 +216,7 @@ def test_aggregate_api_consistency(): def test_agg_dict_renaming_deprecation(): # 15931 - df = pd.DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)}) + df = DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)}) msg = r"nested renamer is not supported" with pytest.raises(SpecificationError, match=msg): @@ -414,7 +414,7 @@ def __call__(self, x): def test_agg_over_numpy_arrays(): # GH 3788 - df = pd.DataFrame( + df = DataFrame( [ [1, np.array([10, 20, 30])], [1, np.array([40, 50, 60])], @@ -427,9 +427,7 @@ def test_agg_over_numpy_arrays(): expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]] expected_index = pd.Index([1, 2], name="category") expected_column = ["arraydata"] - expected = pd.DataFrame( - expected_data, index=expected_index, columns=expected_column - ) + expected = DataFrame(expected_data, index=expected_index, columns=expected_column) tm.assert_frame_equal(result, expected) @@ -438,7 +436,7 @@ def test_agg_tzaware_non_datetime_result(): # discussed in GH#29589, fixed in GH#29641, operating on tzaware values # with function that is not dtype-preserving dti = pd.date_range("2012-01-01", periods=4, tz="UTC") - df = pd.DataFrame({"a": [0, 0, 1, 1], "b": dti}) + df = DataFrame({"a": [0, 0, 1, 1], "b": dti}) gb = df.groupby("a") # Case that _does_ preserve the dtype @@ -462,9 +460,7 @@ def test_agg_tzaware_non_datetime_result(): def test_agg_timezone_round_trip(): # GH 15426 ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific") - df = pd.DataFrame( - {"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]} - ) + df = DataFrame({"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]}) result1 = df.groupby("a")["b"].agg(np.min).iloc[0] result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0] @@ -477,7 +473,7 @@ def test_agg_timezone_round_trip(): dates = [ pd.Timestamp(f"2016-01-0{i:d} 12:00:00", tz="US/Pacific") for i in range(1, 5) ] - df = pd.DataFrame({"A": ["a", "b"] * 2, "B": dates}) + df = DataFrame({"A": ["a", "b"] * 2, "B": dates}) grouped = df.groupby("A") ts = df["B"].iloc[0] @@ -498,13 +494,13 @@ def test_agg_timezone_round_trip(): def test_sum_uint64_overflow(): # see gh-14758 # Convert to uint64 and don't overflow - df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object) + df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object) df = df + 9223372036854775807 index = pd.Index( [9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64 ) - expected = pd.DataFrame( + expected = DataFrame( {1: [9223372036854775809, 9223372036854775811, 9223372036854775813]}, index=index, ) @@ -517,20 +513,20 @@ def test_sum_uint64_overflow(): @pytest.mark.parametrize( "structure, expected", [ - (tuple, pd.DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})), - (list, pd.DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})), + (tuple, DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})), + (list, DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})), ( lambda x: tuple(x), - pd.DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}), + DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}), ), ( lambda x: list(x), - pd.DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}), + DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}), ), ], ) def test_agg_structs_dataframe(structure, expected): - df = pd.DataFrame( + df = DataFrame( {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]} ) @@ -550,7 +546,7 @@ def test_agg_structs_dataframe(structure, expected): ) def test_agg_structs_series(structure, expected): # Issue #18079 - df = pd.DataFrame( + df = DataFrame( {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]} ) @@ -561,7 +557,7 @@ def test_agg_structs_series(structure, expected): def test_agg_category_nansum(observed): categories = ["a", "b", "c"] - df = pd.DataFrame( + df = DataFrame( {"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]} ) result = df.groupby("A", observed=observed).B.agg(np.nansum) @@ -577,12 +573,10 @@ def test_agg_category_nansum(observed): def test_agg_list_like_func(): # GH 18473 - df = pd.DataFrame( - {"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]} - ) + df = DataFrame({"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]}) grouped = df.groupby("A", as_index=False, sort=False) result = grouped.agg({"B": lambda x: list(x)}) - expected = pd.DataFrame( + expected = DataFrame( {"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]} ) tm.assert_frame_equal(result, expected) @@ -590,7 +584,7 @@ def test_agg_list_like_func(): def test_agg_lambda_with_timezone(): # GH 23683 - df = pd.DataFrame( + df = DataFrame( { "tag": [1, 1], "date": [ @@ -600,7 +594,7 @@ def test_agg_lambda_with_timezone(): } ) result = df.groupby("tag").agg({"date": lambda e: e.head(1)}) - expected = pd.DataFrame( + expected = DataFrame( [pd.Timestamp("2018-01-01", tz="UTC")], index=pd.Index([1], name="tag"), columns=["date"], @@ -629,7 +623,7 @@ def test_groupby_agg_err_catching(err_cls): from pandas.tests.extension.decimal.array import DecimalArray, make_data, to_decimal data = make_data()[:5] - df = pd.DataFrame( + df = DataFrame( {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)} ) diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index feb758c82285d..ab44bd17d3f15 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -51,7 +51,7 @@ def test_apply_issues(): def test_apply_trivial(): # GH 20066 # trivial apply: ignore input and return a constant dataframe. - df = pd.DataFrame( + df = DataFrame( {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=["key", "data"], ) @@ -65,7 +65,7 @@ def test_apply_trivial(): def test_apply_trivial_fail(): # GH 20066 - df = pd.DataFrame( + df = DataFrame( {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=["key", "data"], ) @@ -189,7 +189,7 @@ def test_group_apply_once_per_group2(capsys): expected = 2 # Number of times `apply` should call a function for the current test - df = pd.DataFrame( + df = DataFrame( { "group_by_column": [0, 0, 0, 0, 1, 1, 1, 1], "test_column": ["0", "2", "4", "6", "8", "10", "12", "14"], @@ -241,7 +241,7 @@ def test_groupby_apply_identity_maybecopy_index_identical(func): # have an impact on the index structure of the result since this is not # transparent to the user - df = pd.DataFrame({"g": [1, 2, 2, 2], "a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}) + df = DataFrame({"g": [1, 2, 2, 2], "a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}) result = df.groupby("g").apply(func) tm.assert_frame_equal(result, df) @@ -538,7 +538,7 @@ def filt2(x): @pytest.mark.parametrize("test_series", [True, False]) def test_apply_with_duplicated_non_sorted_axis(test_series): # GH 30667 - df = pd.DataFrame( + df = DataFrame( [["x", "p"], ["x", "p"], ["x", "o"]], columns=["X", "Y"], index=[1, 2, 2] ) if test_series: @@ -565,9 +565,7 @@ def test_apply_reindex_values(): # solved in #30679 values = [1, 2, 3, 4] indices = [1, 1, 2, 2] - df = pd.DataFrame( - {"group": ["Group1", "Group2"] * 2, "value": values}, index=indices - ) + df = DataFrame({"group": ["Group1", "Group2"] * 2, "value": values}, index=indices) expected = Series(values, index=indices, name="value") def reindex_helper(x): @@ -608,7 +606,7 @@ def test_apply_numeric_coercion_when_datetime(): # for which are here. # GH 15670 - df = pd.DataFrame( + df = DataFrame( {"Number": [1, 2], "Date": ["2017-03-02"] * 2, "Str": ["foo", "inf"]} ) expected = df.groupby(["Number"]).apply(lambda x: x.iloc[0]) @@ -617,7 +615,7 @@ def test_apply_numeric_coercion_when_datetime(): tm.assert_series_equal(result["Str"], expected["Str"]) # GH 15421 - df = pd.DataFrame( + df = DataFrame( {"A": [10, 20, 30], "B": ["foo", "3", "4"], "T": [pd.Timestamp("12:31:22")] * 3} ) @@ -639,7 +637,7 @@ def predictions(tool): out["useTime"] = str(tool[tool.State == "step2"].oTime.values[0]) return out - df1 = pd.DataFrame( + df1 = DataFrame( { "Key": ["B", "B", "A", "A"], "State": ["step1", "step2", "step1", "step2"], @@ -658,7 +656,7 @@ def test_apply_aggregating_timedelta_and_datetime(): # Regression test for GH 15562 # The following groupby caused ValueErrors and IndexErrors pre 0.20.0 - df = pd.DataFrame( + df = DataFrame( { "clientid": ["A", "B", "C"], "datetime": [np.datetime64("2017-02-01 00:00:00")] * 3, @@ -670,7 +668,7 @@ def test_apply_aggregating_timedelta_and_datetime(): dict(clientid_age=ddf.time_delta_zero.min(), date=ddf.datetime.min()) ) ) - expected = pd.DataFrame( + expected = DataFrame( { "clientid": ["A", "B", "C"], "clientid_age": [np.timedelta64(0, "D")] * 3, @@ -686,13 +684,13 @@ def test_apply_groupby_datetimeindex(): # groupby apply failed on dataframe with DatetimeIndex data = [["A", 10], ["B", 20], ["B", 30], ["C", 40], ["C", 50]] - df = pd.DataFrame( + df = DataFrame( data, columns=["Name", "Value"], index=pd.date_range("2020-09-01", "2020-09-05") ) result = df.groupby("Name").sum() - expected = pd.DataFrame({"Name": ["A", "B", "C"], "Value": [10, 50, 90]}) + expected = DataFrame({"Name": ["A", "B", "C"], "Value": [10, 50, 90]}) expected.set_index("Name", inplace=True) tm.assert_frame_equal(result, expected) @@ -704,7 +702,7 @@ def test_time_field_bug(): # that were not returned by the apply function, an exception would be # raised. - df = pd.DataFrame({"a": 1, "b": [datetime.now() for nn in range(10)]}) + df = DataFrame({"a": 1, "b": [datetime.now() for nn in range(10)]}) def func_with_no_date(batch): return Series({"c": 2}) @@ -713,13 +711,11 @@ def func_with_date(batch): return Series({"b": datetime(2015, 1, 1), "c": 2}) dfg_no_conversion = df.groupby(by=["a"]).apply(func_with_no_date) - dfg_no_conversion_expected = pd.DataFrame({"c": 2}, index=[1]) + dfg_no_conversion_expected = DataFrame({"c": 2}, index=[1]) dfg_no_conversion_expected.index.name = "a" dfg_conversion = df.groupby(by=["a"]).apply(func_with_date) - dfg_conversion_expected = pd.DataFrame( - {"b": datetime(2015, 1, 1), "c": 2}, index=[1] - ) + dfg_conversion_expected = DataFrame({"b": datetime(2015, 1, 1), "c": 2}, index=[1]) dfg_conversion_expected.index.name = "a" tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected) @@ -788,7 +784,7 @@ def test_func(x): def test_groupby_apply_return_empty_chunk(): # GH 22221: apply filter which returns some empty groups - df = pd.DataFrame(dict(value=[0, 1], group=["filled", "empty"])) + df = DataFrame(dict(value=[0, 1], group=["filled", "empty"])) groups = df.groupby("group") result = groups.apply(lambda group: group[group.value != 1]["value"]) expected = Series( @@ -803,11 +799,11 @@ def test_groupby_apply_return_empty_chunk(): def test_apply_with_mixed_types(): # gh-20949 - df = pd.DataFrame({"A": "a a b".split(), "B": [1, 2, 3], "C": [4, 6, 5]}) + df = DataFrame({"A": "a a b".split(), "B": [1, 2, 3], "C": [4, 6, 5]}) g = df.groupby("A") result = g.transform(lambda x: x / x.sum()) - expected = pd.DataFrame({"B": [1 / 3.0, 2 / 3.0, 1], "C": [0.4, 0.6, 1.0]}) + expected = DataFrame({"B": [1 / 3.0, 2 / 3.0, 1], "C": [0.4, 0.6, 1.0]}) tm.assert_frame_equal(result, expected) result = g.apply(lambda x: x / x.sum()) @@ -835,10 +831,10 @@ def test_apply_datetime_issue(group_column_dtlike): # is a datetime object and the column labels are different from # standard int values in range(len(num_columns)) - df = pd.DataFrame({"a": ["foo"], "b": [group_column_dtlike]}) + df = DataFrame({"a": ["foo"], "b": [group_column_dtlike]}) result = df.groupby("a").apply(lambda x: Series(["spam"], index=[42])) - expected = pd.DataFrame( + expected = DataFrame( ["spam"], Index(["foo"], dtype="object", name="a"), columns=[42] ) tm.assert_frame_equal(result, expected) @@ -891,11 +887,11 @@ def test_apply_multi_level_name(category): expected_index = pd.CategoricalIndex([1, 2], categories=[1, 2, 3], name="B") else: expected_index = pd.Index([1, 2], name="B") - df = pd.DataFrame( + df = DataFrame( {"A": np.arange(10), "B": b, "C": list(range(10)), "D": list(range(10))} ).set_index(["A", "B"]) result = df.groupby("B").apply(lambda x: x.sum()) - expected = pd.DataFrame({"C": [20, 25], "D": [20, 25]}, index=expected_index) + expected = DataFrame({"C": [20, 25], "D": [20, 25]}, index=expected_index) tm.assert_frame_equal(result, expected) assert df.index.names == ["A", "B"] @@ -953,7 +949,7 @@ def test_apply_index_has_complex_internals(index): ) def test_apply_function_returns_non_pandas_non_scalar(function, expected_values): # GH 31441 - df = pd.DataFrame(["A", "A", "B", "B"], columns=["groups"]) + df = DataFrame(["A", "A", "B", "B"], columns=["groups"]) result = df.groupby("groups").apply(function) expected = Series(expected_values, index=pd.Index(["A", "B"], name="groups")) tm.assert_series_equal(result, expected) @@ -964,7 +960,7 @@ def test_apply_function_returns_numpy_array(): def fct(group): return group["B"].values.flatten() - df = pd.DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]}) + df = DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]}) result = df.groupby("A").apply(fct) expected = Series( @@ -976,7 +972,7 @@ def fct(group): @pytest.mark.parametrize("function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1]) def test_apply_function_index_return(function): # GH: 22541 - df = pd.DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"]) + df = DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"]) result = df.groupby("id").apply(function) expected = Series( [pd.Index([0, 4, 7, 9]), pd.Index([1, 2, 3, 5]), pd.Index([6, 8])], @@ -987,9 +983,7 @@ def test_apply_function_index_return(function): def test_apply_function_with_indexing(): # GH: 33058 - df = pd.DataFrame( - {"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]} - ) + df = DataFrame({"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]}) def fn(x): x.col2[x.index[-1]] = 0 @@ -1026,8 +1020,8 @@ def test_apply_with_timezones_aware(): dates = ["2001-01-01"] * 2 + ["2001-01-02"] * 2 + ["2001-01-03"] * 2 index_no_tz = pd.DatetimeIndex(dates) index_tz = pd.DatetimeIndex(dates, tz="UTC") - df1 = pd.DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_no_tz}) - df2 = pd.DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_tz}) + df1 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_no_tz}) + df2 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_tz}) result1 = df1.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) result2 = df2.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) @@ -1046,7 +1040,7 @@ def test_apply_is_unchanged_when_other_methods_are_called_first(reduction_func): } ) - expected = pd.DataFrame( + expected = DataFrame( {"a": [264, 297], "b": [15, 6], "c": [150, 60]}, index=pd.Index([88, 99], name="a"), ) @@ -1067,7 +1061,7 @@ def test_apply_is_unchanged_when_other_methods_are_called_first(reduction_func): def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp(): # GH 29617 - df = pd.DataFrame( + df = DataFrame( { "A": ["a", "a", "a", "b"], "B": [ @@ -1100,7 +1094,7 @@ def test_apply_by_cols_equals_apply_by_rows_transposed(): # should give the same result. There was previously a bug where the # by_rows operation would work fine, but by_cols would throw a ValueError - df = pd.DataFrame( + df = DataFrame( np.random.random([6, 4]), columns=pd.MultiIndex.from_product([["A", "B"], [1, 2]]), ) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index ab211845c1957..9785a95f3b6cb 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -471,7 +471,7 @@ def test_observed_nth(): # GH 26385 cat = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"]) ser = Series([1, 2, 3]) - df = pd.DataFrame({"cat": cat, "ser": ser}) + df = DataFrame({"cat": cat, "ser": ser}) result = df.groupby("cat", observed=False)["ser"].nth(0) @@ -768,10 +768,10 @@ def test_preserve_on_ordered_ops(func, values): # gh-18502 # preserve the categoricals on ops c = pd.Categorical(["first", "second", "third", "fourth"], ordered=True) - df = pd.DataFrame({"payload": [-1, -2, -1, -2], "col": c}) + df = DataFrame({"payload": [-1, -2, -1, -2], "col": c}) g = df.groupby("payload") result = getattr(g, func)() - expected = pd.DataFrame( + expected = DataFrame( {"payload": [-2, -1], "col": Series(values, dtype=c.dtype)} ).set_index("payload") tm.assert_frame_equal(result, expected) @@ -818,9 +818,7 @@ def test_groupby_empty_with_category(): # GH-9614 # test fix for when group by on None resulted in # coercion of dtype categorical -> float - df = pd.DataFrame( - {"A": [None] * 3, "B": pd.Categorical(["train", "train", "test"])} - ) + df = DataFrame({"A": [None] * 3, "B": pd.Categorical(["train", "train", "test"])}) result = df.groupby("A").first()["B"] expected = Series( pd.Categorical([], categories=["test", "train"]), @@ -1280,10 +1278,10 @@ def test_groupby_cat_preserves_structure(observed, ordered): def test_get_nonexistent_category(): # Accessing a Category that is not in the dataframe - df = pd.DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)}) + df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)}) with pytest.raises(KeyError, match="'vau'"): df.groupby("var").apply( - lambda rows: pd.DataFrame( + lambda rows: DataFrame( {"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]} ) ) @@ -1300,7 +1298,7 @@ def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, r ) request.node.add_marker(mark) - df = pd.DataFrame( + df = DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABCD")), "cat_2": pd.Categorical(list("AB") * 2, categories=list("ABCD")), @@ -1333,7 +1331,7 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( ) request.node.add_marker(mark) - df = pd.DataFrame( + df = DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), "cat_2": pd.Categorical(list("AB") * 2, categories=list("ABC")), @@ -1369,7 +1367,7 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_fun if reduction_func == "ngroup": pytest.skip("ngroup does not return the Categories on the index") - df = pd.DataFrame( + df = DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), "cat_2": pd.Categorical(list("1111"), categories=list("12")), @@ -1399,7 +1397,7 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( if reduction_func == "ngroup": pytest.skip("ngroup does not return the Categories on the index") - df = pd.DataFrame( + df = DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), "cat_2": pd.Categorical(list("1111"), categories=list("12")), @@ -1424,7 +1422,7 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( def test_series_groupby_categorical_aggregation_getitem(): # GH 8870 d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]} - df = pd.DataFrame(d) + df = DataFrame(d) cat = pd.cut(df["foo"], np.linspace(0, 20, 5)) df["range"] = cat groups = df.groupby(["range", "baz"], as_index=True, sort=True) @@ -1439,7 +1437,7 @@ def test_series_groupby_categorical_aggregation_getitem(): ) def test_groupby_agg_categorical_columns(func, expected_values): # 31256 - df = pd.DataFrame( + df = DataFrame( { "id": [0, 1, 2, 3, 4], "groups": [0, 1, 1, 2, 2], @@ -1448,17 +1446,15 @@ def test_groupby_agg_categorical_columns(func, expected_values): ).set_index("id") result = df.groupby("groups").agg(func) - expected = pd.DataFrame( + expected = DataFrame( {"value": expected_values}, index=pd.Index([0, 1, 2], name="groups") ) tm.assert_frame_equal(result, expected) def test_groupby_agg_non_numeric(): - df = pd.DataFrame( - {"A": pd.Categorical(["a", "a", "b"], categories=["a", "b", "c"])} - ) - expected = pd.DataFrame({"A": [2, 1]}, index=[1, 2]) + df = DataFrame({"A": pd.Categorical(["a", "a", "b"], categories=["a", "b", "c"])}) + expected = DataFrame({"A": [2, 1]}, index=[1, 2]) result = df.groupby([1, 2, 1]).agg(pd.Series.nunique) tm.assert_frame_equal(result, expected) @@ -1471,9 +1467,7 @@ def test_groupby_agg_non_numeric(): def test_groupy_first_returned_categorical_instead_of_dataframe(func): # GH 28641: groupby drops index, when grouping over categorical column with # first/last. Renamed Categorical instead of DataFrame previously. - df = pd.DataFrame( - {"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()} - ) + df = DataFrame({"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()}) df_grouped = df.groupby("A")["B"] result = getattr(df_grouped, func)() expected = Series(["b"], index=pd.Index([1997], name="A"), name="B") @@ -1494,7 +1488,7 @@ def test_read_only_category_no_sort(): def test_sorted_missing_category_values(): # GH 28597 - df = pd.DataFrame( + df = DataFrame( { "foo": [ "small", @@ -1515,7 +1509,7 @@ def test_sorted_missing_category_values(): .cat.set_categories(["tiny", "small", "medium", "large"], ordered=True) ) - expected = pd.DataFrame( + expected = DataFrame( { "tiny": {"A": 0, "C": 0}, "small": {"A": 0, "C": 1}, @@ -1539,7 +1533,7 @@ def test_sorted_missing_category_values(): def test_agg_cython_category_not_implemented_fallback(): # https://github.com/pandas-dev/pandas/issues/31450 - df = pd.DataFrame({"col_num": [1, 1, 2, 3]}) + df = DataFrame({"col_num": [1, 1, 2, 3]}) df["col_cat"] = df["col_num"].astype("category") result = df.groupby("col_num").col_cat.first() @@ -1557,15 +1551,15 @@ def test_agg_cython_category_not_implemented_fallback(): def test_aggregate_categorical_lost_index(func: str): # GH: 28641 groupby drops index, when grouping over categorical column with min/max ds = Series(["b"], dtype="category").cat.as_ordered() - df = pd.DataFrame({"A": [1997], "B": ds}) + df = DataFrame({"A": [1997], "B": ds}) result = df.groupby("A").agg({"B": func}) - expected = pd.DataFrame({"B": ["b"]}, index=pd.Index([1997], name="A")) + expected = DataFrame({"B": ["b"]}, index=pd.Index([1997], name="A")) tm.assert_frame_equal(result, expected) def test_aggregate_categorical_with_isnan(): # GH 29837 - df = pd.DataFrame( + df = DataFrame( { "A": [1, 1, 1, 1], "B": [1, 2, 1, 2], @@ -1579,7 +1573,7 @@ def test_aggregate_categorical_with_isnan(): result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum()) index = pd.MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B")) - expected = pd.DataFrame( + expected = DataFrame( data={ "numerical_col": [1.0, 0.0], "object_col": [0, 0], @@ -1592,7 +1586,7 @@ def test_aggregate_categorical_with_isnan(): def test_categorical_transform(): # GH 29037 - df = pd.DataFrame( + df = DataFrame( { "package_id": [1, 1, 1, 2, 2, 3], "status": [ @@ -1613,7 +1607,7 @@ def test_categorical_transform(): df["last_status"] = df.groupby("package_id")["status"].transform(max) result = df.copy() - expected = pd.DataFrame( + expected = DataFrame( { "package_id": [1, 1, 1, 2, 2, 3], "status": [ @@ -1647,7 +1641,7 @@ def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals( # GH 34951 cat = pd.Categorical([0, 0, 1, 1]) val = [0, 1, 1, 0] - df = pd.DataFrame({"a": cat, "b": cat, "c": val}) + df = DataFrame({"a": cat, "b": cat, "c": val}) idx = pd.Categorical([0, 1]) idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"]) @@ -1672,7 +1666,7 @@ def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals( # GH 34951 cat = pd.Categorical([0, 0, 1, 1]) val = [0, 1, 1, 0] - df = pd.DataFrame({"a": cat, "b": cat, "c": val}) + df = DataFrame({"a": cat, "b": cat, "c": val}) idx = pd.Categorical([0, 1]) idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"]) diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index a5842dee2c43e..c03ed00e1a081 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -283,7 +283,7 @@ def test_count(): def test_count_non_nulls(): # GH#5610 # count counts non-nulls - df = pd.DataFrame( + df = DataFrame( [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, np.nan]], columns=["A", "B", "C"], ) @@ -301,12 +301,12 @@ def test_count_non_nulls(): def test_count_object(): - df = pd.DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3}) + df = DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3}) result = df.groupby("c").a.count() expected = Series([3, 3], index=pd.Index([2, 3], name="c"), name="a") tm.assert_series_equal(result, expected) - df = pd.DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3}) + df = DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3}) result = df.groupby("c").a.count() expected = Series([1, 3], index=pd.Index([2, 3], name="c"), name="a") tm.assert_series_equal(result, expected) @@ -318,7 +318,7 @@ def test_count_cross_type(): (np.random.randint(0, 5, (100, 2)), np.random.randint(0, 2, (100, 2))) ) - df = pd.DataFrame(vals, columns=["a", "b", "c", "d"]) + df = DataFrame(vals, columns=["a", "b", "c", "d"]) df[df == 2] = np.nan expected = df.groupby(["c", "d"]).count() diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index ad2e61ad99389..448e6c6e6f64a 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -26,9 +26,9 @@ def test_filter_series(): def test_filter_single_column_df(): - df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7]) - expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6]) - expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5]) + df = DataFrame([1, 3, 20, 5, 22, 24, 7]) + expected_odd = DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6]) + expected_even = DataFrame([20, 22, 24], index=[2, 4, 5]) grouper = df[0].apply(lambda x: x % 2) grouped = df.groupby(grouper) tm.assert_frame_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd) @@ -45,20 +45,20 @@ def test_filter_single_column_df(): def test_filter_multi_column_df(): - df = pd.DataFrame({"A": [1, 12, 12, 1], "B": [1, 1, 1, 1]}) + df = DataFrame({"A": [1, 12, 12, 1], "B": [1, 1, 1, 1]}) grouper = df["A"].apply(lambda x: x % 2) grouped = df.groupby(grouper) - expected = pd.DataFrame({"A": [12, 12], "B": [1, 1]}, index=[1, 2]) + expected = DataFrame({"A": [12, 12], "B": [1, 1]}, index=[1, 2]) tm.assert_frame_equal( grouped.filter(lambda x: x["A"].sum() - x["B"].sum() > 10), expected ) def test_filter_mixed_df(): - df = pd.DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()}) + df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()}) grouper = df["A"].apply(lambda x: x % 2) grouped = df.groupby(grouper) - expected = pd.DataFrame({"A": [12, 12], "B": ["b", "c"]}, index=[1, 2]) + expected = DataFrame({"A": [12, 12], "B": ["b", "c"]}, index=[1, 2]) tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 10), expected) @@ -67,7 +67,7 @@ def test_filter_out_all_groups(): grouper = s.apply(lambda x: x % 2) grouped = s.groupby(grouper) tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]]) - df = pd.DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()}) + df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()}) grouper = df["A"].apply(lambda x: x % 2) grouped = df.groupby(grouper) tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 1000), df.loc[[]]) @@ -79,7 +79,7 @@ def test_filter_out_no_groups(): grouped = s.groupby(grouper) filtered = grouped.filter(lambda x: x.mean() > 0) tm.assert_series_equal(filtered, s) - df = pd.DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()}) + df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()}) grouper = df["A"].apply(lambda x: x % 2) grouped = df.groupby(grouper) filtered = grouped.filter(lambda x: x["A"].mean() > 0) @@ -88,16 +88,16 @@ def test_filter_out_no_groups(): def test_filter_out_all_groups_in_df(): # GH12768 - df = pd.DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]}) + df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]}) res = df.groupby("a") res = res.filter(lambda x: x["b"].sum() > 5, dropna=False) - expected = pd.DataFrame({"a": [np.nan] * 3, "b": [np.nan] * 3}) + expected = DataFrame({"a": [np.nan] * 3, "b": [np.nan] * 3}) tm.assert_frame_equal(expected, res) - df = pd.DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]}) + df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]}) res = df.groupby("a") res = res.filter(lambda x: x["b"].sum() > 5, dropna=True) - expected = pd.DataFrame({"a": [], "b": []}, dtype="int64") + expected = DataFrame({"a": [], "b": []}, dtype="int64") tm.assert_frame_equal(expected, res) @@ -119,7 +119,7 @@ def raise_if_sum_is_zero(x): def test_filter_with_axis_in_groupby(): # issue 11041 index = pd.MultiIndex.from_product([range(10), [0, 1]]) - data = pd.DataFrame(np.arange(100).reshape(-1, 20), columns=index, dtype="int64") + data = DataFrame(np.arange(100).reshape(-1, 20), columns=index, dtype="int64") result = data.groupby(level=0, axis=1).filter(lambda x: x.iloc[0, 0] > 10) expected = data.iloc[:, 12:20] tm.assert_frame_equal(result, expected) @@ -551,7 +551,7 @@ def test_filter_has_access_to_grouped_cols(): def test_filter_enforces_scalarness(): - df = pd.DataFrame( + df = DataFrame( [ ["best", "a", "x"], ["worst", "b", "y"], @@ -568,7 +568,7 @@ def test_filter_enforces_scalarness(): def test_filter_non_bool_raises(): - df = pd.DataFrame( + df = DataFrame( [ ["best", "a", 1], ["worst", "b", 1], diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 7a309db143758..6d760035246c7 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -90,7 +90,7 @@ def test_min_date_with_nans(): dates = pd.to_datetime( Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d" ).dt.date - df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates}) + df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates}) result = df.groupby("b", as_index=False)["c"].min()["c"] expected = pd.to_datetime( @@ -122,7 +122,7 @@ def test_intercept_builtin_sum(): @pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key def test_builtins_apply(keys, f): # see gh-8155 - df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"]) + df = DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"]) df["jolie"] = np.random.randn(1000) fname = f.__name__ @@ -151,7 +151,7 @@ def test_arg_passthru(): # GH3668 # GH5724 - df = pd.DataFrame( + df = DataFrame( { "group": [1, 1, 2], "int": [1, 2, 3], @@ -179,7 +179,7 @@ def test_arg_passthru(): expected_columns_numeric = Index(["int", "float", "category_int"]) # mean / median - expected = pd.DataFrame( + expected = DataFrame( { "category_int": [7.5, 9], "float": [4.5, 6.0], @@ -308,7 +308,7 @@ def test_non_cython_api(): levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]], codes=[[0] * 8, list(range(8))], ) - expected = pd.DataFrame( + expected = DataFrame( [ [1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0], [0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], @@ -385,7 +385,7 @@ def test_cython_median(): def test_median_empty_bins(observed): - df = pd.DataFrame(np.random.randint(0, 44, 500)) + df = DataFrame(np.random.randint(0, 44, 500)) grps = range(0, 55, 5) bins = pd.cut(df[0], grps) @@ -411,7 +411,7 @@ def test_median_empty_bins(observed): ) def test_groupby_non_arithmetic_agg_types(dtype, method, data): # GH9311, GH6620 - df = pd.DataFrame( + df = DataFrame( [{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}] ) @@ -426,7 +426,7 @@ def test_groupby_non_arithmetic_agg_types(dtype, method, data): out_type = dtype exp = data["df"] - df_out = pd.DataFrame(exp) + df_out = DataFrame(exp) df_out["b"] = df_out.b.astype(out_type) df_out.set_index("a", inplace=True) @@ -448,7 +448,7 @@ def test_groupby_non_arithmetic_agg_types(dtype, method, data): ) def test_groupby_non_arithmetic_agg_int_like_precision(i): # see gh-6620, gh-9311 - df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}]) + df = DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}]) grp_exp = { "first": {"expected": i[0]}, @@ -478,7 +478,7 @@ def test_groupby_non_arithmetic_agg_int_like_precision(i): ) def test_idxmin_idxmax_returns_int_types(func, values): # GH 25444 - df = pd.DataFrame( + df = DataFrame( { "name": ["A", "A", "B", "B"], "c_int": [1, 2, 3, 4], @@ -490,21 +490,21 @@ def test_idxmin_idxmax_returns_int_types(func, values): result = getattr(df.groupby("name"), func)() - expected = pd.DataFrame(values, index=Index(["A", "B"], name="name")) + expected = DataFrame(values, index=Index(["A", "B"], name="name")) tm.assert_frame_equal(result, expected) def test_groupby_cumprod(): # GH 4095 - df = pd.DataFrame({"key": ["b"] * 10, "value": 2}) + df = DataFrame({"key": ["b"] * 10, "value": 2}) actual = df.groupby("key")["value"].cumprod() expected = df.groupby("key")["value"].apply(lambda x: x.cumprod()) expected.name = "value" tm.assert_series_equal(actual, expected) - df = pd.DataFrame({"key": ["b"] * 100, "value": 2}) + df = DataFrame({"key": ["b"] * 100, "value": 2}) actual = df.groupby("key")["value"].cumprod() # if overflows, groupby product casts to float # while numpy passes back invalid values @@ -648,7 +648,7 @@ def test_nsmallest(): @pytest.mark.parametrize("func", ["cumprod", "cumsum"]) def test_numpy_compat(func): # see gh-12811 - df = pd.DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]}) + df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]}) g = df.groupby("A") msg = "numpy operations are not valid with groupby" @@ -664,14 +664,12 @@ def test_cummin(numpy_dtypes_for_minmax): min_val = numpy_dtypes_for_minmax[1] # GH 15048 - base_df = pd.DataFrame( - {"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]} - ) + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) expected_mins = [3, 3, 3, 2, 2, 2, 2, 1] df = base_df.astype(dtype) - expected = pd.DataFrame({"B": expected_mins}).astype(dtype) + expected = DataFrame({"B": expected_mins}).astype(dtype) result = df.groupby("A").cummin() tm.assert_frame_equal(result, expected) result = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame() @@ -687,30 +685,30 @@ def test_cummin(numpy_dtypes_for_minmax): # Test nan in some values base_df.loc[[0, 2, 4, 6], "B"] = np.nan - expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]}) + expected = DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]}) result = base_df.groupby("A").cummin() tm.assert_frame_equal(result, expected) expected = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame() tm.assert_frame_equal(result, expected) # GH 15561 - df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"]))) + df = DataFrame(dict(a=[1], b=pd.to_datetime(["2001"]))) expected = Series(pd.to_datetime("2001"), index=[0], name="b") result = df.groupby("a")["b"].cummin() tm.assert_series_equal(expected, result) # GH 15635 - df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2])) + df = DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2])) result = df.groupby("a").b.cummin() expected = Series([1, 2, 1], name="b") tm.assert_series_equal(result, expected) def test_cummin_all_nan_column(): - base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) - expected = pd.DataFrame({"B": [np.nan] * 8}) + expected = DataFrame({"B": [np.nan] * 8}) result = base_df.groupby("A").cummin() tm.assert_frame_equal(expected, result) result = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame() @@ -722,14 +720,12 @@ def test_cummax(numpy_dtypes_for_minmax): max_val = numpy_dtypes_for_minmax[2] # GH 15048 - base_df = pd.DataFrame( - {"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]} - ) + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3] df = base_df.astype(dtype) - expected = pd.DataFrame({"B": expected_maxs}).astype(dtype) + expected = DataFrame({"B": expected_maxs}).astype(dtype) result = df.groupby("A").cummax() tm.assert_frame_equal(result, expected) result = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame() @@ -745,30 +741,30 @@ def test_cummax(numpy_dtypes_for_minmax): # Test nan in some values base_df.loc[[0, 2, 4, 6], "B"] = np.nan - expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]}) + expected = DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]}) result = base_df.groupby("A").cummax() tm.assert_frame_equal(result, expected) expected = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame() tm.assert_frame_equal(result, expected) # GH 15561 - df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"]))) + df = DataFrame(dict(a=[1], b=pd.to_datetime(["2001"]))) expected = Series(pd.to_datetime("2001"), index=[0], name="b") result = df.groupby("a")["b"].cummax() tm.assert_series_equal(expected, result) # GH 15635 - df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1])) + df = DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1])) result = df.groupby("a").b.cummax() expected = Series([2, 1, 2], name="b") tm.assert_series_equal(result, expected) def test_cummax_all_nan_column(): - base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) - expected = pd.DataFrame({"B": [np.nan] * 8}) + expected = DataFrame({"B": [np.nan] * 8}) result = base_df.groupby("A").cummax() tm.assert_frame_equal(expected, result) result = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame() @@ -800,7 +796,7 @@ def test_is_monotonic_increasing(in_vals, out_vals): "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"], "C": in_vals, } - df = pd.DataFrame(source_dict) + df = DataFrame(source_dict) result = df.groupby("B").C.is_monotonic_increasing index = Index(list("abcd"), name="B") expected = Series(index=index, data=out_vals, name="C") @@ -837,7 +833,7 @@ def test_is_monotonic_decreasing(in_vals, out_vals): "C": in_vals, } - df = pd.DataFrame(source_dict) + df = DataFrame(source_dict) result = df.groupby("B").C.is_monotonic_decreasing index = Index(list("abcd"), name="B") expected = Series(index=index, data=out_vals, name="C") @@ -887,7 +883,7 @@ def test_frame_describe_multikey(tsframe): levels=[[col], group.columns], codes=[[0] * len(group.columns), range(len(group.columns))], ) - group = pd.DataFrame(group.values, columns=group_col, index=group.index) + group = DataFrame(group.values, columns=group_col, index=group.index) desc_groups.append(group) expected = pd.concat(desc_groups, axis=1) tm.assert_frame_equal(result, expected) @@ -929,13 +925,13 @@ def test_frame_describe_unstacked_format(): pd.Timestamp("2011-01-06 12:43:33", tz=None): 5000000000, pd.Timestamp("2011-01-06 12:54:09", tz=None): 100000000, } - df = pd.DataFrame({"PRICE": prices, "VOLUME": volumes}) + df = DataFrame({"PRICE": prices, "VOLUME": volumes}) result = df.groupby("PRICE").VOLUME.describe() data = [ df[df.PRICE == 24990].VOLUME.describe().values.tolist(), df[df.PRICE == 25499].VOLUME.describe().values.tolist(), ] - expected = pd.DataFrame( + expected = DataFrame( data, index=pd.Index([24990, 25499], name="PRICE"), columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], @@ -951,7 +947,7 @@ def test_frame_describe_unstacked_format(): @pytest.mark.parametrize("as_index", [True, False]) def test_describe_with_duplicate_output_column_names(as_index): # GH 35314 - df = pd.DataFrame( + df = DataFrame( { "a": [99, 99, 99, 88, 88, 88], "b": [1, 2, 3, 4, 5, 6], @@ -1007,7 +1003,7 @@ def test_describe_with_duplicate_output_column_names(as_index): def test_groupby_mean_no_overflow(): # Regression test for (#22487) - df = pd.DataFrame( + df = DataFrame( { "user": ["A", "A", "A", "A", "A"], "connections": [4970, 4749, 4719, 4704, 18446744073699999744], @@ -1032,9 +1028,9 @@ def test_apply_to_nullable_integer_returns_float(values, function): output = 0.5 if function == "var" else 1.5 arr = np.array([output] * 3, dtype=float) idx = pd.Index([1, 2, 3], dtype=object, name="a") - expected = pd.DataFrame({"b": arr}, index=idx) + expected = DataFrame({"b": arr}, index=idx) - groups = pd.DataFrame(values, dtype="Int64").groupby("a") + groups = DataFrame(values, dtype="Int64").groupby("a") result = getattr(groups, function)() tm.assert_frame_equal(result, expected) @@ -1049,7 +1045,7 @@ def test_apply_to_nullable_integer_returns_float(values, function): def test_groupby_sum_below_mincount_nullable_integer(): # https://github.com/pandas-dev/pandas/issues/32861 - df = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64") + df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64") grouped = df.groupby("a") idx = pd.Index([0, 1, 2], dtype=object, name="a") @@ -1058,7 +1054,5 @@ def test_groupby_sum_below_mincount_nullable_integer(): tm.assert_series_equal(result, expected) result = grouped.sum(min_count=2) - expected = pd.DataFrame( - {"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx - ) + expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index a1c00eb5f38f5..1c8c7cbaa68c5 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -248,7 +248,7 @@ def test_len(): assert len(grouped) == expected # issue 11016 - df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3])) + df = DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3])) assert len(df.groupby("a")) == 0 assert len(df.groupby("b")) == 3 assert len(df.groupby(["a", "b"])) == 3 @@ -594,7 +594,7 @@ def test_groupby_multiple_columns(df, op): def test_as_index_select_column(): # GH 5764 - df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"]) + df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"]) result = df.groupby("A", as_index=False)["B"].get_group(1) expected = Series([2, 4], name="B") tm.assert_series_equal(result, expected) @@ -1186,7 +1186,7 @@ def test_groupby_dtype_inference_empty(): def test_groupby_unit64_float_conversion(): #  GH: 30859 groupby converts unit64 to floats sometimes - df = pd.DataFrame({"first": [1], "second": [1], "value": [16148277970000000000]}) + df = DataFrame({"first": [1], "second": [1], "value": [16148277970000000000]}) result = df.groupby(["first", "second"])["value"].max() expected = Series( [16148277970000000000], @@ -1217,7 +1217,7 @@ def test_groupby_keys_same_size_as_index(): index = pd.date_range( start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq ) - df = pd.DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index) + df = DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index) result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean() expected = df.set_index([df.index, "metric"]) @@ -1227,17 +1227,17 @@ def test_groupby_keys_same_size_as_index(): def test_groupby_one_row(): # GH 11741 msg = r"^'Z'$" - df1 = pd.DataFrame(np.random.randn(1, 4), columns=list("ABCD")) + df1 = DataFrame(np.random.randn(1, 4), columns=list("ABCD")) with pytest.raises(KeyError, match=msg): df1.groupby("Z") - df2 = pd.DataFrame(np.random.randn(2, 4), columns=list("ABCD")) + df2 = DataFrame(np.random.randn(2, 4), columns=list("ABCD")) with pytest.raises(KeyError, match=msg): df2.groupby("Z") def test_groupby_nat_exclude(): # GH 6992 - df = pd.DataFrame( + df = DataFrame( { "values": np.random.randn(8), "dt": [ @@ -1454,7 +1454,7 @@ def foo(x): def test_group_name_available_in_inference_pass(): # gh-15062 - df = pd.DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)}) + df = DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)}) names = [] @@ -1733,7 +1733,7 @@ def test_group_shift_lose_timezone(): def test_pivot_table_values_key_error(): # This test is designed to replicate the error in issue #14938 - df = pd.DataFrame( + df = DataFrame( { "eventDate": pd.date_range(datetime.today(), periods=20, freq="M").tolist(), "thename": range(0, 20), @@ -1762,7 +1762,7 @@ def test_empty_dataframe_groupby(): def test_tuple_as_grouping(): # https://github.com/pandas-dev/pandas/issues/18314 - df = pd.DataFrame( + df = DataFrame( { ("a", "b"): [1, 1, 1, 1], "a": [2, 2, 2, 2], @@ -1781,7 +1781,7 @@ def test_tuple_as_grouping(): def test_tuple_correct_keyerror(): # https://github.com/pandas-dev/pandas/issues/18798 - df = pd.DataFrame( + df = DataFrame( 1, index=range(3), columns=pd.MultiIndex.from_product([[1, 2], [3, 4]]) ) with pytest.raises(KeyError, match=r"^\(7, 8\)$"): @@ -1790,13 +1790,13 @@ def test_tuple_correct_keyerror(): def test_groupby_agg_ohlc_non_first(): # GH 21716 - df = pd.DataFrame( + df = DataFrame( [[1], [1]], columns=["foo"], index=pd.date_range("2018-01-01", periods=2, freq="D"), ) - expected = pd.DataFrame( + expected = DataFrame( [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]], columns=pd.MultiIndex.from_tuples( ( @@ -1860,7 +1860,7 @@ def test_groupby_groups_in_BaseGrouper(): # GH 26326 # Test if DataFrame grouped with a pandas.Grouper has correct groups mi = pd.MultiIndex.from_product([["A", "B"], ["C", "D"]], names=["alpha", "beta"]) - df = pd.DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi) + df = DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi) result = df.groupby([pd.Grouper(level="alpha"), "beta"]) expected = df.groupby(["alpha", "beta"]) assert result.groups == expected.groups @@ -1873,7 +1873,7 @@ def test_groupby_groups_in_BaseGrouper(): @pytest.mark.parametrize("group_name", ["x", ["x"]]) def test_groupby_axis_1(group_name): # GH 27614 - df = pd.DataFrame( + df = DataFrame( np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20] ) df.index.name = "y" @@ -1886,7 +1886,7 @@ def test_groupby_axis_1(group_name): # test on MI column iterables = [["bar", "baz", "foo"], ["one", "two"]] mi = pd.MultiIndex.from_product(iterables=iterables, names=["x", "x1"]) - df = pd.DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi) + df = DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi) results = df.groupby(group_name, axis=1).sum() expected = df.T.groupby(group_name).sum().T tm.assert_frame_equal(results, expected) @@ -1961,7 +1961,7 @@ def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected): def test_groupby_only_none_group(): # see GH21624 # this was crashing with "ValueError: Length of passed values is 1, index implies 0" - df = pd.DataFrame({"g": [None], "x": 1}) + df = DataFrame({"g": [None], "x": 1}) actual = df.groupby("g")["x"].transform("sum") expected = Series([np.nan], name="x") @@ -1981,7 +1981,7 @@ def test_groupby_duplicate_index(): @pytest.mark.parametrize("bool_agg_func", ["any", "all"]) def test_bool_aggs_dup_column_labels(bool_agg_func): # 21668 - df = pd.DataFrame([[True, True]], columns=["a", "a"]) + df = DataFrame([[True, True]], columns=["a", "a"]) grp_by = df.groupby([0]) result = getattr(grp_by, bool_agg_func)() @@ -1997,7 +1997,7 @@ def test_dup_labels_output_shape(groupby_func, idx): if groupby_func in {"size", "ngroup", "cumcount"}: pytest.skip("Not applicable") - df = pd.DataFrame([[1, 1]], columns=idx) + df = DataFrame([[1, 1]], columns=idx) grp_by = df.groupby([0]) args = [] @@ -2017,7 +2017,7 @@ def test_dup_labels_output_shape(groupby_func, idx): def test_groupby_crash_on_nunique(axis): # Fix following 30253 - df = pd.DataFrame({("A", "B"): [1, 2], ("A", "C"): [1, 3], ("D", "B"): [0, 0]}) + df = DataFrame({("A", "B"): [1, 2], ("A", "C"): [1, 3], ("D", "B"): [0, 0]}) axis_number = df._get_axis_number(axis) if not axis_number: @@ -2025,7 +2025,7 @@ def test_groupby_crash_on_nunique(axis): result = df.groupby(axis=axis_number, level=0).nunique() - expected = pd.DataFrame({"A": [1, 2], "D": [1, 1]}) + expected = DataFrame({"A": [1, 2], "D": [1, 1]}) if not axis_number: expected = expected.T @@ -2034,7 +2034,7 @@ def test_groupby_crash_on_nunique(axis): def test_groupby_list_level(): # GH 9790 - expected = pd.DataFrame(np.arange(0, 9).reshape(3, 3)) + expected = DataFrame(np.arange(0, 9).reshape(3, 3)) result = expected.groupby(level=[0]).mean() tm.assert_frame_equal(result, expected) @@ -2048,7 +2048,7 @@ def test_groupby_list_level(): ) def test_groups_repr_truncates(max_seq_items, expected): # GH 1135 - df = pd.DataFrame(np.random.randn(5, 1)) + df = DataFrame(np.random.randn(5, 1)) df["a"] = df.index with pd.option_context("display.max_seq_items", max_seq_items): @@ -2061,7 +2061,7 @@ def test_groups_repr_truncates(max_seq_items, expected): def test_group_on_two_row_multiindex_returns_one_tuple_key(): # GH 18451 - df = pd.DataFrame([{"a": 1, "b": 2, "c": 99}, {"a": 1, "b": 2, "c": 88}]) + df = DataFrame([{"a": 1, "b": 2, "c": 99}, {"a": 1, "b": 2, "c": 88}]) df = df.set_index(["a", "b"]) grp = df.groupby(["a", "b"]) @@ -2106,7 +2106,7 @@ def test_group_on_two_row_multiindex_returns_one_tuple_key(): ) def test_subsetting_columns_keeps_attrs(klass, attr, value): # GH 9959 - When subsetting columns, don't drop attributes - df = pd.DataFrame({"a": [1], "b": [2], "c": [3]}) + df = DataFrame({"a": [1], "b": [2], "c": [3]}) if attr != "axis": df = df.set_index("a") @@ -2119,7 +2119,7 @@ def test_subsetting_columns_keeps_attrs(klass, attr, value): def test_groupby_column_index_name_lost(func): # GH: 29764 groupby loses index sometimes expected = pd.Index(["a"], name="idx") - df = pd.DataFrame([[1]], columns=expected) + df = DataFrame([[1]], columns=expected) df_grouped = df.groupby([1]) result = getattr(df_grouped, func)().columns tm.assert_index_equal(result, expected) @@ -2127,10 +2127,10 @@ def test_groupby_column_index_name_lost(func): def test_groupby_duplicate_columns(): # GH: 31735 - df = pd.DataFrame( + df = DataFrame( {"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]} ).astype(object) df.columns = ["A", "B", "B"] result = df.groupby([0, 0, 0, 0]).min() - expected = pd.DataFrame([["e", "a", 1]], columns=["A", "B", "B"]) + expected = DataFrame([["e", "a", 1]], columns=["A", "B", "B"]) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 3b3967b858adf..48859db305e46 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -158,7 +158,7 @@ def test_grouper_multilevel_freq(self): d0 = date.today() - timedelta(days=14) dates = date_range(d0, date.today()) date_index = pd.MultiIndex.from_product([dates, dates], names=["foo", "bar"]) - df = pd.DataFrame(np.random.randint(0, 100, 225), index=date_index) + df = DataFrame(np.random.randint(0, 100, 225), index=date_index) # Check string level expected = ( @@ -258,7 +258,7 @@ def test_grouper_column_and_index(self): [("a", 1), ("a", 2), ("a", 3), ("b", 1), ("b", 2), ("b", 3)] ) idx.names = ["outer", "inner"] - df_multi = pd.DataFrame( + df_multi = DataFrame( {"A": np.arange(6), "B": ["one", "one", "two", "two", "one", "one"]}, index=idx, ) @@ -289,7 +289,7 @@ def test_groupby_levels_and_columns(self): idx = pd.MultiIndex.from_tuples( [(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names ) - df = pd.DataFrame(np.arange(12).reshape(-1, 3), index=idx) + df = DataFrame(np.arange(12).reshape(-1, 3), index=idx) by_levels = df.groupby(level=idx_names).mean() # reset_index changes columns dtype to object @@ -407,7 +407,7 @@ def test_multiindex_passthru(self): # GH 7997 # regression from 0.14.1 - df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) df.columns = pd.MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)]) result = df.groupby(axis=1, level=[0, 1]).first() @@ -463,7 +463,7 @@ def test_multiindex_columns_empty_level(self): def test_groupby_multiindex_tuple(self): # GH 17979 - df = pd.DataFrame( + df = DataFrame( [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]], columns=pd.MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]), ) @@ -471,7 +471,7 @@ def test_groupby_multiindex_tuple(self): result = df.groupby(("b", 1)).groups tm.assert_dict_equal(expected, result) - df2 = pd.DataFrame( + df2 = DataFrame( df.values, columns=pd.MultiIndex.from_arrays( [["a", "b", "b", "c"], ["d", "d", "e", "e"]] @@ -481,7 +481,7 @@ def test_groupby_multiindex_tuple(self): result = df.groupby(("b", 1)).groups tm.assert_dict_equal(expected, result) - df3 = pd.DataFrame(df.values, columns=[("a", "d"), ("b", "d"), ("b", "e"), "c"]) + df3 = DataFrame(df.values, columns=[("a", "d"), ("b", "d"), ("b", "e"), "c"]) expected = df3.groupby([("b", "d")]).groups result = df.groupby(("b", 1)).groups tm.assert_dict_equal(expected, result) @@ -596,7 +596,7 @@ def test_grouping_labels(self, mframe): def test_list_grouper_with_nat(self): # GH 14715 - df = pd.DataFrame({"date": pd.date_range("1/1/2011", periods=365, freq="D")}) + df = DataFrame({"date": pd.date_range("1/1/2011", periods=365, freq="D")}) df.iloc[-1] = pd.NaT grouper = pd.Grouper(key="date", freq="AS") @@ -632,7 +632,7 @@ def test_evaluate_with_empty_groups(self, func, expected): # test transform'ing empty groups # (not testing other agg fns, because they return # different index objects. - df = pd.DataFrame({1: [], 2: []}) + df = DataFrame({1: [], 2: []}) g = df.groupby(1) result = getattr(g[2], func)(lambda x: x) tm.assert_series_equal(result, expected) @@ -680,13 +680,13 @@ def test_groupby_level_index_value_all_na(self): def test_groupby_multiindex_level_empty(self): # https://github.com/pandas-dev/pandas/issues/31670 - df = pd.DataFrame( + df = DataFrame( [[123, "a", 1.0], [123, "b", 2.0]], columns=["id", "category", "value"] ) df = df.set_index(["id", "category"]) empty = df[df.value < 0] result = empty.groupby("id").sum() - expected = pd.DataFrame( + expected = DataFrame( dtype="float64", columns=["value"], index=pd.Int64Index([], name="id") ) tm.assert_frame_equal(result, expected) @@ -746,7 +746,7 @@ def test_get_group(self): def test_get_group_empty_bins(self, observed): - d = pd.DataFrame([3, 1, 7, 6]) + d = DataFrame([3, 1, 7, 6]) bins = [0, 5, 10, 15] g = d.groupby(pd.cut(d[0], bins), observed=observed) @@ -784,10 +784,10 @@ def test_groupby_with_empty(self): assert next(iter(grouped), None) is None def test_groupby_with_single_column(self): - df = pd.DataFrame({"a": list("abssbab")}) + df = DataFrame({"a": list("abssbab")}) tm.assert_frame_equal(df.groupby("a").get_group("a"), df.iloc[[0, 5]]) # GH 13530 - exp = pd.DataFrame(index=pd.Index(["a", "b", "s"], name="a")) + exp = DataFrame(index=pd.Index(["a", "b", "s"], name="a")) tm.assert_frame_equal(df.groupby("a").count(), exp) tm.assert_frame_equal(df.groupby("a").sum(), exp) tm.assert_frame_equal(df.groupby("a").nth(1), exp) @@ -796,7 +796,7 @@ def test_gb_key_len_equal_axis_len(self): # GH16843 # test ensures that index and column keys are recognized correctly # when number of keys equals axis length of groupby - df = pd.DataFrame( + df = DataFrame( [["foo", "bar", "B", 1], ["foo", "bar", "B", 2], ["foo", "baz", "C", 3]], columns=["first", "second", "third", "one"], ) @@ -905,7 +905,7 @@ def test_dictify(self, df): def test_groupby_with_small_elem(self): # GH 8542 # length=2 - df = pd.DataFrame( + df = DataFrame( {"event": ["start", "start"], "change": [1234, 5678]}, index=pd.DatetimeIndex(["2014-09-10", "2013-10-10"]), ) @@ -920,7 +920,7 @@ def test_groupby_with_small_elem(self): res = grouped.get_group((pd.Timestamp("2013-10-31"), "start")) tm.assert_frame_equal(res, df.iloc[[1], :]) - df = pd.DataFrame( + df = DataFrame( {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]}, index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-09-15"]), ) @@ -936,7 +936,7 @@ def test_groupby_with_small_elem(self): tm.assert_frame_equal(res, df.iloc[[1], :]) # length=3 - df = pd.DataFrame( + df = DataFrame( {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]}, index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-08-05"]), ) diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index 7dd37163021ed..fe35f6f5d9416 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -57,9 +57,7 @@ def test_first_last_nth(df): @pytest.mark.parametrize("method", ["first", "last"]) def test_first_last_with_na_object(method, nulls_fixture): # https://github.com/pandas-dev/pandas/issues/32123 - groups = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby( - "a" - ) + groups = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby("a") result = getattr(groups, method)() if method == "first": @@ -69,7 +67,7 @@ def test_first_last_with_na_object(method, nulls_fixture): values = np.array(values, dtype=result["b"].dtype) idx = pd.Index([1, 2], name="a") - expected = pd.DataFrame({"b": values}, index=idx) + expected = DataFrame({"b": values}, index=idx) tm.assert_frame_equal(result, expected) @@ -77,9 +75,7 @@ def test_first_last_with_na_object(method, nulls_fixture): @pytest.mark.parametrize("index", [0, -1]) def test_nth_with_na_object(index, nulls_fixture): # https://github.com/pandas-dev/pandas/issues/32123 - groups = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby( - "a" - ) + groups = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby("a") result = groups.nth(index) if index == 0: @@ -89,7 +85,7 @@ def test_nth_with_na_object(index, nulls_fixture): values = np.array(values, dtype=result["b"].dtype) idx = pd.Index([1, 2], name="a") - expected = pd.DataFrame({"b": values}, index=idx) + expected = DataFrame({"b": values}, index=idx) tm.assert_frame_equal(result, expected) @@ -142,7 +138,7 @@ def test_first_last_nth_dtypes(df_mixed_floats): def test_first_last_nth_nan_dtype(): # GH 33591 - df = pd.DataFrame({"data": ["A"], "nans": Series([np.nan], dtype=object)}) + df = DataFrame({"data": ["A"], "nans": Series([np.nan], dtype=object)}) grouped = df.groupby("data") expected = df.set_index("data").nans @@ -154,7 +150,7 @@ def test_first_last_nth_nan_dtype(): def test_first_strings_timestamps(): # GH 11244 - test = pd.DataFrame( + test = DataFrame( { pd.Timestamp("2012-01-01 00:00:00"): ["a", "b"], pd.Timestamp("2012-01-02 00:00:00"): ["c", "d"], @@ -387,7 +383,7 @@ def test_first_last_tz(data, expected_first, expected_last): def test_first_last_tz_multi_column(method, ts, alpha): # GH 21603 category_string = Series(list("abc")).astype("category") - df = pd.DataFrame( + df = DataFrame( { "group": [1, 1, 2], "category_string": category_string, @@ -395,7 +391,7 @@ def test_first_last_tz_multi_column(method, ts, alpha): } ) result = getattr(df.groupby("group"), method)() - expected = pd.DataFrame( + expected = DataFrame( { "category_string": pd.Categorical( [alpha, "c"], dtype=category_string.dtype @@ -614,7 +610,7 @@ def test_nth_nan_in_grouper(dropna): columns=list("abc"), ) result = df.groupby("a").nth(0, dropna=dropna) - expected = pd.DataFrame( + expected = DataFrame( [[2, 3], [6, 7]], columns=list("bc"), index=Index(["abc", "def"], name="a") ) diff --git a/pandas/tests/groupby/test_nunique.py b/pandas/tests/groupby/test_nunique.py index 8e37ac1a1a21d..7edb358170b50 100644 --- a/pandas/tests/groupby/test_nunique.py +++ b/pandas/tests/groupby/test_nunique.py @@ -83,7 +83,7 @@ def test_nunique(): def test_nunique_with_object(): # GH 11077 - data = pd.DataFrame( + data = DataFrame( [ [100, 1, "Alice"], [200, 2, "Bob"], @@ -110,7 +110,7 @@ def test_nunique_with_empty_series(): def test_nunique_with_timegrouper(): # GH 13453 - test = pd.DataFrame( + test = DataFrame( { "time": [ Timestamp("2016-06-28 09:35:35"), @@ -156,22 +156,22 @@ def test_nunique_with_timegrouper(): ) def test_nunique_with_NaT(key, data, dropna, expected): # GH 27951 - df = pd.DataFrame({"key": key, "data": data}) + df = DataFrame({"key": key, "data": data}) result = df.groupby(["key"])["data"].nunique(dropna=dropna) tm.assert_series_equal(result, expected) def test_nunique_preserves_column_level_names(): # GH 23222 - test = pd.DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0")) + test = DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0")) result = test.groupby([0, 0, 0]).nunique() - expected = pd.DataFrame([2], columns=test.columns) + expected = DataFrame([2], columns=test.columns) tm.assert_frame_equal(result, expected) def test_nunique_transform_with_datetime(): # GH 35109 - transform with nunique on datetimes results in integers - df = pd.DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"]) + df = DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"]) result = df.groupby([0, 0, 1])["date"].transform("nunique") expected = Series([2, 2, 1], name="date") tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_pipe.py b/pandas/tests/groupby/test_pipe.py index d2ab016f608fa..6812ac6ce8f34 100644 --- a/pandas/tests/groupby/test_pipe.py +++ b/pandas/tests/groupby/test_pipe.py @@ -42,7 +42,7 @@ def test_pipe_args(): # Test passing args to the pipe method of DataFrameGroupBy. # Issue #17871 - df = pd.DataFrame( + df = DataFrame( { "group": ["A", "A", "B", "B", "C"], "x": [1.0, 2.0, 3.0, 2.0, 5.0], diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/test_quantile.py index 9338742195bfe..14b0d9ab60e52 100644 --- a/pandas/tests/groupby/test_quantile.py +++ b/pandas/tests/groupby/test_quantile.py @@ -54,18 +54,18 @@ def test_quantile(interpolation, a_vals, b_vals, q): def test_quantile_array(): # https://github.com/pandas-dev/pandas/issues/27526 - df = pd.DataFrame({"A": [0, 1, 2, 3, 4]}) + df = DataFrame({"A": [0, 1, 2, 3, 4]}) result = df.groupby([0, 0, 1, 1, 1]).quantile([0.25]) index = pd.MultiIndex.from_product([[0, 1], [0.25]]) - expected = pd.DataFrame({"A": [0.25, 2.50]}, index=index) + expected = DataFrame({"A": [0.25, 2.50]}, index=index) tm.assert_frame_equal(result, expected) - df = pd.DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]}) + df = DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]}) index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]]) result = df.groupby([0, 0, 1, 1]).quantile([0.25, 0.75]) - expected = pd.DataFrame( + expected = DataFrame( {"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index ) tm.assert_frame_equal(result, expected) @@ -73,11 +73,11 @@ def test_quantile_array(): def test_quantile_array2(): # https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959 - df = pd.DataFrame( + df = DataFrame( np.random.RandomState(0).randint(0, 5, size=(10, 3)), columns=list("ABC") ) result = df.groupby("A").quantile([0.3, 0.7]) - expected = pd.DataFrame( + expected = DataFrame( { "B": [0.9, 2.1, 2.2, 3.4, 1.6, 2.4, 2.3, 2.7, 0.0, 0.0], "C": [1.2, 2.8, 1.8, 3.0, 0.0, 0.0, 1.9, 3.1, 3.0, 3.0], @@ -90,16 +90,16 @@ def test_quantile_array2(): def test_quantile_array_no_sort(): - df = pd.DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]}) + df = DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]}) result = df.groupby([1, 0, 1], sort=False).quantile([0.25, 0.5, 0.75]) - expected = pd.DataFrame( + expected = DataFrame( {"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]}, index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]), ) tm.assert_frame_equal(result, expected) result = df.groupby([1, 0, 1], sort=False).quantile([0.75, 0.25]) - expected = pd.DataFrame( + expected = DataFrame( {"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]}, index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]), ) @@ -107,7 +107,7 @@ def test_quantile_array_no_sort(): def test_quantile_array_multiple_levels(): - df = pd.DataFrame( + df = DataFrame( {"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]} ) result = df.groupby(["c", "d"]).quantile([0.25, 0.75]) @@ -115,7 +115,7 @@ def test_quantile_array_multiple_levels(): [("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)], names=["c", "d", None], ) - expected = pd.DataFrame( + expected = DataFrame( {"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index ) tm.assert_frame_equal(result, expected) @@ -127,9 +127,7 @@ def test_quantile_array_multiple_levels(): def test_groupby_quantile_with_arraylike_q_and_int_columns(frame_size, groupby, q): # GH30289 nrow, ncol = frame_size - df = pd.DataFrame( - np.array([ncol * [_ % 4] for _ in range(nrow)]), columns=range(ncol) - ) + df = DataFrame(np.array([ncol * [_ % 4] for _ in range(nrow)]), columns=range(ncol)) idx_levels = [list(range(min(nrow, 4)))] * len(groupby) + [q] idx_codes = [[x for x in range(min(nrow, 4)) for _ in q]] * len(groupby) + [ @@ -142,7 +140,7 @@ def test_groupby_quantile_with_arraylike_q_and_int_columns(frame_size, groupby, [float(x)] * (ncol - len(groupby)) for x in range(min(nrow, 4)) for _ in q ] expected_columns = [x for x in range(ncol) if x not in groupby] - expected = pd.DataFrame( + expected = DataFrame( expected_values, index=expected_index, columns=expected_columns ) result = df.groupby(groupby).quantile(q) @@ -151,9 +149,7 @@ def test_groupby_quantile_with_arraylike_q_and_int_columns(frame_size, groupby, def test_quantile_raises(): - df = pd.DataFrame( - [["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"] - ) + df = DataFrame([["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"]) with pytest.raises(TypeError, match="cannot be performed against 'object' dtypes"): df.groupby("key").quantile() @@ -161,7 +157,7 @@ def test_quantile_raises(): def test_quantile_out_of_bounds_q_raises(): # https://github.com/pandas-dev/pandas/issues/27470 - df = pd.DataFrame(dict(a=[0, 0, 0, 1, 1, 1], b=range(6))) + df = DataFrame(dict(a=[0, 0, 0, 1, 1, 1], b=range(6))) g = df.groupby([0, 0, 0, 1, 1, 1]) with pytest.raises(ValueError, match="Got '50.0' instead"): g.quantile(50) @@ -173,7 +169,7 @@ def test_quantile_out_of_bounds_q_raises(): def test_quantile_missing_group_values_no_segfaults(): # GH 28662 data = np.array([1.0, np.nan, 1.0]) - df = pd.DataFrame(dict(key=data, val=range(3))) + df = DataFrame(dict(key=data, val=range(3))) # Random segfaults; would have been guaranteed in loop grp = df.groupby("key") @@ -195,9 +191,9 @@ def test_quantile_missing_group_values_correct_results( key, val, expected_key, expected_val ): # GH 28662, GH 33200, GH 33569 - df = pd.DataFrame({"key": key, "val": val}) + df = DataFrame({"key": key, "val": val}) - expected = pd.DataFrame( + expected = DataFrame( expected_val, index=pd.Index(expected_key, name="key"), columns=["val"] ) @@ -220,7 +216,7 @@ def test_quantile_missing_group_values_correct_results( @pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]]) def test_groupby_quantile_nullable_array(values, q): # https://github.com/pandas-dev/pandas/issues/33136 - df = pd.DataFrame({"a": ["x"] * 3 + ["y"] * 3, "b": values}) + df = DataFrame({"a": ["x"] * 3 + ["y"] * 3, "b": values}) result = df.groupby("a")["b"].quantile(q) if isinstance(q, list): @@ -236,7 +232,7 @@ def test_groupby_quantile_nullable_array(values, q): @pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]]) def test_groupby_quantile_skips_invalid_dtype(q): - df = pd.DataFrame({"a": [1], "b": [2.0], "c": ["x"]}) + df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]}) result = df.groupby("a").quantile(q) expected = df.groupby("a")[["b"]].quantile(q) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index 4693fe360c819..0a1232d3f24da 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -72,7 +72,7 @@ def test_groupby_with_timegrouper_methods(self, should_sort): # GH 3881 # make sure API of timegrouper conforms - df = pd.DataFrame( + df = DataFrame( { "Branch": "A A A A A B".split(), "Buyer": "Carl Mark Carl Joe Joe Carl".split(), @@ -403,7 +403,7 @@ def test_timegrouper_apply_return_type_series(self): # Using `apply` with the `TimeGrouper` should give the # same return type as an `apply` with a `Grouper`. # Issue #11742 - df = pd.DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]}) + df = DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]}) df_dt = df.copy() df_dt["date"] = pd.to_datetime(df_dt["date"]) @@ -420,7 +420,7 @@ def test_timegrouper_apply_return_type_value(self): # Using `apply` with the `TimeGrouper` should give the # same return type as an `apply` with a `Grouper`. # Issue #11742 - df = pd.DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]}) + df = DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]}) df_dt = df.copy() df_dt["date"] = pd.to_datetime(df_dt["date"]) @@ -448,7 +448,7 @@ def test_groupby_groups_datetimeindex(self): # GH#11442 index = pd.date_range("2015/01/01", periods=5, name="date") - df = pd.DataFrame({"A": [5, 6, 7, 8, 9], "B": [1, 2, 3, 4, 5]}, index=index) + df = DataFrame({"A": [5, 6, 7, 8, 9], "B": [1, 2, 3, 4, 5]}, index=index) result = df.groupby(level="date").groups dates = ["2015-01-05", "2015-01-04", "2015-01-03", "2015-01-02", "2015-01-01"] expected = { @@ -461,7 +461,7 @@ def test_groupby_groups_datetimeindex(self): result = grouped.get_group(date) data = [[df.loc[date, "A"], df.loc[date, "B"]]] expected_index = pd.DatetimeIndex([date], name="date", freq="D") - expected = pd.DataFrame(data, columns=list("AB"), index=expected_index) + expected = DataFrame(data, columns=list("AB"), index=expected_index) tm.assert_frame_equal(result, expected) def test_groupby_groups_datetimeindex_tz(self): @@ -671,7 +671,7 @@ def test_groupby_with_timezone_selection(self): # GH 11616 # Test that column selection returns output in correct timezone. np.random.seed(42) - df = pd.DataFrame( + df = DataFrame( { "factor": np.random.randint(0, 3, size=60), "time": pd.date_range( @@ -687,9 +687,9 @@ def test_timezone_info(self): # see gh-11682: Timezone info lost when broadcasting # scalar datetime to DataFrame - df = pd.DataFrame({"a": [1], "b": [datetime.now(pytz.utc)]}) + df = DataFrame({"a": [1], "b": [datetime.now(pytz.utc)]}) assert df["b"][0].tzinfo == pytz.utc - df = pd.DataFrame({"a": [1, 2, 3]}) + df = DataFrame({"a": [1, 2, 3]}) df["b"] = datetime.now(pytz.utc) assert df["b"][0].tzinfo == pytz.utc @@ -733,7 +733,7 @@ def test_first_last_max_min_on_time_data(self): def test_nunique_with_timegrouper_and_nat(self): # GH 17575 - test = pd.DataFrame( + test = DataFrame( { "time": [ Timestamp("2016-06-28 09:35:35"), @@ -760,7 +760,7 @@ def test_scalar_call_versus_list_call(self): ), "value": [1, 2, 3], } - data_frame = pd.DataFrame(data_frame).set_index("time") + data_frame = DataFrame(data_frame).set_index("time") grouper = pd.Grouper(freq="D") grouped = data_frame.groupby(grouper) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 4b79701a57acd..946e60d17e0bb 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -96,7 +96,7 @@ def test_transform_fast(): tm.assert_series_equal(result, expected) # GH 12737 - df = pd.DataFrame( + df = DataFrame( { "grouping": [0, 1, 1, 3], "f": [1.1, 2.1, 3.1, 4.5], @@ -113,7 +113,7 @@ def test_transform_fast(): pd.Timestamp("2014-1-2"), pd.Timestamp("2014-1-4"), ] - expected = pd.DataFrame( + expected = DataFrame( {"f": [1.1, 2.1, 2.1, 4.5], "d": dates, "i": [1, 2, 2, 4]}, columns=["f", "i", "d"], ) @@ -125,7 +125,7 @@ def test_transform_fast(): tm.assert_frame_equal(result, expected) # dup columns - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=["g", "a", "a"]) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["g", "a", "a"]) result = df.groupby("g").transform("first") expected = df.drop("g", axis=1) tm.assert_frame_equal(result, expected) @@ -223,11 +223,11 @@ def test_transform_numeric_to_boolean(): # inconsistency in transforming boolean values expected = Series([True, True], name="A") - df = pd.DataFrame({"A": [1.1, 2.2], "B": [1, 2]}) + df = DataFrame({"A": [1.1, 2.2], "B": [1, 2]}) result = df.groupby("B").A.transform(lambda x: True) tm.assert_series_equal(result, expected) - df = pd.DataFrame({"A": [1, 2], "B": [1, 2]}) + df = DataFrame({"A": [1, 2], "B": [1, 2]}) result = df.groupby("B").A.transform(lambda x: True) tm.assert_series_equal(result, expected) @@ -389,7 +389,7 @@ def test_transform_function_aliases(df): def test_series_fast_transform_date(): # GH 13191 - df = pd.DataFrame( + df = DataFrame( {"grouping": [np.nan, 1, 1, 3], "d": pd.date_range("2014-1-1", "2014-1-4")} ) result = df.groupby("grouping")["d"].transform("first") @@ -405,7 +405,7 @@ def test_series_fast_transform_date(): def test_transform_length(): # GH 9697 - df = pd.DataFrame({"col1": [1, 1, 2, 2], "col2": [1, 2, 3, np.nan]}) + df = DataFrame({"col1": [1, 1, 2, 2], "col2": [1, 2, 3, np.nan]}) expected = Series([3.0] * 4) def nsum(x): @@ -426,7 +426,7 @@ def test_transform_coercion(): # 14457 # when we are transforming be sure to not coerce # via assignment - df = pd.DataFrame(dict(A=["a", "a"], B=[0, 1])) + df = DataFrame(dict(A=["a", "a"], B=[0, 1])) g = df.groupby("A") expected = g.transform(np.mean) @@ -482,7 +482,7 @@ def test_groupby_transform_with_int(): def test_groupby_transform_with_nan_group(): # GH 9941 - df = pd.DataFrame({"a": range(10), "b": [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]}) + df = DataFrame({"a": range(10), "b": [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]}) result = df.groupby(df.b)["a"].transform(max) expected = Series([1.0, 1.0, 2.0, 3.0, np.nan, 6.0, 6.0, 9.0, 9.0, 9.0], name="a") tm.assert_series_equal(result, expected) @@ -663,7 +663,7 @@ def test_cython_transform_series(op, args, targop): ], ) def test_groupby_cum_skipna(op, skipna, input, exp): - df = pd.DataFrame(input) + df = DataFrame(input) result = df.groupby("key")["value"].transform(op, skipna=skipna) if isinstance(exp, dict): expected = exp[(op, skipna)] @@ -778,7 +778,7 @@ def test_transform_with_non_scalar_group(): ("non", "G"), ] ) - df = pd.DataFrame( + df = DataFrame( np.random.randint(1, 10, (4, 12)), columns=cols, index=["A", "C", "G", "T"] ) @@ -793,7 +793,7 @@ def test_transform_with_non_scalar_group(): ("a", Series([1, 1, 1], name="a"), tm.assert_series_equal), ( ["a", "c"], - pd.DataFrame({"a": [1, 1, 1], "c": [1, 1, 1]}), + DataFrame({"a": [1, 1, 1], "c": [1, 1, 1]}), tm.assert_frame_equal, ), ], @@ -807,7 +807,7 @@ def test_transform_numeric_ret(cols, exp, comp_func, agg_func, request): request.node.add_marker(pytest.mark.xfail(reason=reason)) # GH 19200 - df = pd.DataFrame( + df = DataFrame( {"a": pd.date_range("2018-01-01", periods=3), "b": range(3), "c": range(7, 10)} ) @@ -890,7 +890,7 @@ def test_pad_stable_sorting(fill_method): if fill_method == "bfill": y = y[::-1] - df = pd.DataFrame({"x": x, "y": y}) + df = DataFrame({"x": x, "y": y}) expected = df.drop("x", 1) result = getattr(df.groupby("x"), fill_method)() @@ -978,7 +978,7 @@ def test_ffill_bfill_non_unique_multilevel(func, expected_status): @pytest.mark.parametrize("func", [np.any, np.all]) def test_any_all_np_func(func): # GH 20653 - df = pd.DataFrame( + df = DataFrame( [["foo", True], [np.nan, True], ["foo", True]], columns=["key", "val"] ) @@ -1000,8 +1000,8 @@ def demean_rename(x): return result - df = pd.DataFrame({"group": list("ababa"), "value": [1, 1, 1, 2, 2]}) - expected = pd.DataFrame({"value": [-1.0 / 3, -0.5, -1.0 / 3, 0.5, 2.0 / 3]}) + df = DataFrame({"group": list("ababa"), "value": [1, 1, 1, 2, 2]}) + expected = DataFrame({"value": [-1.0 / 3, -0.5, -1.0 / 3, 0.5, 2.0 / 3]}) result = df.groupby("group").transform(demean_rename) tm.assert_frame_equal(result, expected) @@ -1013,9 +1013,9 @@ def demean_rename(x): def test_groupby_transform_timezone_column(func): # GH 24198 ts = pd.to_datetime("now", utc=True).tz_convert("Asia/Singapore") - result = pd.DataFrame({"end_time": [ts], "id": [1]}) + result = DataFrame({"end_time": [ts], "id": [1]}) result["max_end_time"] = result.groupby("id").end_time.transform(func) - expected = pd.DataFrame([[ts, 1, ts]], columns=["end_time", "id", "max_end_time"]) + expected = DataFrame([[ts, 1, ts]], columns=["end_time", "id", "max_end_time"]) tm.assert_frame_equal(result, expected) @@ -1030,7 +1030,7 @@ def test_groupby_transform_with_datetimes(func, values): # GH 15306 dates = pd.date_range("1/1/2011", periods=10, freq="D") - stocks = pd.DataFrame({"price": np.arange(10.0)}, index=dates) + stocks = DataFrame({"price": np.arange(10.0)}, index=dates) stocks["week_id"] = dates.isocalendar().week result = stocks.groupby(stocks["week_id"])["price"].transform(func) @@ -1057,7 +1057,7 @@ def test_transform_absent_categories(func): @pytest.mark.parametrize("key, val", [("level", 0), ("by", Series([0]))]) def test_ffill_not_in_axis(func, key, val): # GH 21521 - df = pd.DataFrame([[np.nan]]) + df = DataFrame([[np.nan]]) result = getattr(df.groupby(**{key: val}), func)() expected = df @@ -1143,7 +1143,7 @@ def test_transform_fastpath_raises(): # GH#29631 case where fastpath defined in groupby.generic _choose_path # raises, but slow_path does not - df = pd.DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]}) + df = DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]}) gb = df.groupby("A") def func(grp): @@ -1165,13 +1165,13 @@ def func(grp): result = gb.transform(func) - expected = pd.DataFrame([2, -2, 2, 4], columns=["B"]) + expected = DataFrame([2, -2, 2, 4], columns=["B"]) tm.assert_frame_equal(result, expected) def test_transform_lambda_indexing(): # GH 7883 - df = pd.DataFrame( + df = DataFrame( { "A": ["foo", "bar", "foo", "bar", "foo", "flux", "foo", "flux"], "B": ["one", "one", "two", "three", "two", "six", "five", "three"], @@ -1211,14 +1211,14 @@ def test_categorical_and_not_categorical_key(observed): # and a non-categorical key, doesn't try to expand the output to include # non-observed categories but instead matches the input shape. # GH 32494 - df_with_categorical = pd.DataFrame( + df_with_categorical = DataFrame( { "A": pd.Categorical(["a", "b", "a"], categories=["a", "b", "c"]), "B": [1, 2, 3], "C": ["a", "b", "a"], } ) - df_without_categorical = pd.DataFrame( + df_without_categorical = DataFrame( {"A": ["a", "b", "a"], "B": [1, 2, 3], "C": ["a", "b", "a"]} ) @@ -1226,7 +1226,7 @@ def test_categorical_and_not_categorical_key(observed): result = df_with_categorical.groupby(["A", "C"], observed=observed).transform("sum") expected = df_without_categorical.groupby(["A", "C"]).transform("sum") tm.assert_frame_equal(result, expected) - expected_explicit = pd.DataFrame({"B": [4, 2, 4]}) + expected_explicit = DataFrame({"B": [4, 2, 4]}) tm.assert_frame_equal(result, expected_explicit) # Series case diff --git a/pandas/tests/indexes/multi/test_conversion.py b/pandas/tests/indexes/multi/test_conversion.py index 3519c5d0d5a9a..c80548783d148 100644 --- a/pandas/tests/indexes/multi/test_conversion.py +++ b/pandas/tests/indexes/multi/test_conversion.py @@ -98,7 +98,7 @@ def test_to_frame_dtype_fidelity(): ) original_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} - expected_df = pd.DataFrame( + expected_df = DataFrame( { "dates": pd.date_range("19910905", periods=6, tz="US/Eastern"), "a": [1, 1, 1, 2, 2, 2], diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py index a1e5cc33ef2f6..3d7e6e9c32248 100644 --- a/pandas/tests/indexes/multi/test_sorting.py +++ b/pandas/tests/indexes/multi/test_sorting.py @@ -98,7 +98,7 @@ def test_unsortedindex(): [("z", "a"), ("x", "a"), ("y", "b"), ("x", "b"), ("y", "a"), ("z", "b")], names=["one", "two"], ) - df = pd.DataFrame([[i, 10 * i] for i in range(6)], index=mi, columns=["one", "two"]) + df = DataFrame([[i, 10 * i] for i in range(6)], index=mi, columns=["one", "two"]) # GH 16734: not sorted, but no real slicing result = df.loc(axis=0)["z", "a"] diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index 1f3f59d038ce9..df59d09edd3ef 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -131,9 +131,9 @@ def test_mi_intervalindex_slicing_with_scalar(self): ) idx.names = ["Item", "RID", "MP"] - df = pd.DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8]}) + df = DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8]}) df.index = idx - query_df = pd.DataFrame( + query_df = DataFrame( { "Item": ["FC", "OWNER", "FC", "OWNER", "OWNER"], "RID": ["RID1", "RID1", "RID1", "RID2", "RID2"], diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index 6072400d06a36..03046f51d668a 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -448,7 +448,7 @@ def test_loc_period_string_indexing(): a = pd.period_range("2013Q1", "2013Q4", freq="Q") i = (1111, 2222, 3333) idx = pd.MultiIndex.from_product((a, i), names=("Periode", "CVR")) - df = pd.DataFrame( + df = DataFrame( index=idx, columns=( "OMS", @@ -478,7 +478,7 @@ def test_loc_datetime_mask_slicing(): # GH 16699 dt_idx = pd.to_datetime(["2017-05-04", "2017-05-05"]) m_idx = pd.MultiIndex.from_product([dt_idx, dt_idx], names=["Idx1", "Idx2"]) - df = pd.DataFrame( + df = DataFrame( data=[[1, 2], [3, 4], [5, 6], [7, 6]], index=m_idx, columns=["C1", "C2"] ) result = df.loc[(dt_idx[0], (df.index.get_level_values(1) > "2017-05-04")), "C1"] @@ -554,7 +554,7 @@ def test_3levels_leading_period_index(): class TestKeyErrorsWithMultiIndex: def test_missing_keys_raises_keyerror(self): # GH#27420 KeyError, not TypeError - df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=["A", "B", "C"]) + df = DataFrame(np.arange(12).reshape(4, 3), columns=["A", "B", "C"]) df2 = df.set_index(["A", "B"]) with pytest.raises(KeyError, match="1"): diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py index 4565d79c632de..2e97dec789c5b 100644 --- a/pandas/tests/indexing/multiindex/test_multiindex.py +++ b/pandas/tests/indexing/multiindex/test_multiindex.py @@ -73,9 +73,9 @@ def test_nested_tuples_duplicates(self): idx = pd.Index(["a", "a", "c"]) mi = pd.MultiIndex.from_arrays([dti, idx], names=["index1", "index2"]) - df = pd.DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi) + df = DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi) - expected = pd.DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi) + expected = DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi) df2 = df.copy(deep=True) df2.loc[(dti[0], "a"), "c2"] = 1.0 diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py index 853b92ea91274..b58b81d5aa1b3 100644 --- a/pandas/tests/indexing/multiindex/test_setitem.py +++ b/pandas/tests/indexing/multiindex/test_setitem.py @@ -429,9 +429,9 @@ def test_setitem_nonmonotonic(self): index = pd.MultiIndex.from_tuples( [("a", "c"), ("b", "x"), ("a", "d")], names=["l1", "l2"] ) - df = pd.DataFrame(data=[0, 1, 2], index=index, columns=["e"]) + df = DataFrame(data=[0, 1, 2], index=index, columns=["e"]) df.loc["a", "e"] = np.arange(99, 101, dtype="int64") - expected = pd.DataFrame({"e": [99, 1, 100]}, index=index) + expected = DataFrame({"e": [99, 1, 100]}, index=index) tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index c1b41c6f5d8cf..024cc3ad72688 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -496,7 +496,7 @@ def test_loc_axis_arguments(self): def test_loc_axis_single_level_multi_col_indexing_multiindex_col_df(self): # GH29519 - df = pd.DataFrame( + df = DataFrame( np.arange(27).reshape(3, 9), columns=pd.MultiIndex.from_product( [["a1", "a2", "a3"], ["b1", "b2", "b3"]] @@ -510,7 +510,7 @@ def test_loc_axis_single_level_multi_col_indexing_multiindex_col_df(self): def test_loc_axis_single_level_single_col_indexing_multiindex_col_df(self): # GH29519 - df = pd.DataFrame( + df = DataFrame( np.arange(27).reshape(3, 9), columns=pd.MultiIndex.from_product( [["a1", "a2", "a3"], ["b1", "b2", "b3"]] @@ -526,7 +526,7 @@ def test_loc_ax_single_level_indexer_simple_df(self): # GH29519 # test single level indexing on single index column data frame - df = pd.DataFrame(np.arange(9).reshape(3, 3), columns=["a", "b", "c"]) + df = DataFrame(np.arange(9).reshape(3, 3), columns=["a", "b", "c"]) result = df.loc(axis=1)["a"] expected = Series(np.array([0, 3, 6]), name="a") tm.assert_series_equal(result, expected) @@ -736,11 +736,11 @@ def test_non_reducing_slice_on_multiindex(self): ("b", "c"): [3, 2], ("b", "d"): [4, 1], } - df = pd.DataFrame(dic, index=[0, 1]) + df = DataFrame(dic, index=[0, 1]) idx = pd.IndexSlice slice_ = idx[:, idx["b", "d"]] tslice_ = non_reducing_slice(slice_) result = df.loc[tslice_] - expected = pd.DataFrame({("b", "d"): [4, 1]}) + expected = DataFrame({("b", "d"): [4, 1]}) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index 1241d394d7936..d162468235767 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -83,7 +83,7 @@ def test_setitem_cache_updating(self): def test_altering_series_clears_parent_cache(self): # GH #33675 - df = pd.DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["A", "B"]) ser = df["A"] assert "A" in df._item_cache @@ -350,14 +350,12 @@ def test_detect_chained_assignment_warnings_errors(self): def test_detect_chained_assignment_warnings_filter_and_dupe_cols(self): # xref gh-13017. with option_context("chained_assignment", "warn"): - df = pd.DataFrame( - [[1, 2, 3], [4, 5, 6], [7, 8, -9]], columns=["a", "a", "c"] - ) + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, -9]], columns=["a", "a", "c"]) with tm.assert_produces_warning(com.SettingWithCopyWarning): df.c.loc[df.c > 0] = None - expected = pd.DataFrame( + expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, -9]], columns=["a", "a", "c"] ) tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index f5e6aea5f8db8..5e00056c33db7 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -334,9 +334,9 @@ def test_loc_setitem_with_existing_dst(self): end = pd.Timestamp("2017-10-29 03:00:00+0100", tz="Europe/Madrid") ts = pd.Timestamp("2016-10-10 03:00:00", tz="Europe/Madrid") idx = pd.date_range(start, end, closed="left", freq="H") - result = pd.DataFrame(index=idx, columns=["value"]) + result = DataFrame(index=idx, columns=["value"]) result.loc[ts, "value"] = 12 - expected = pd.DataFrame( + expected = DataFrame( [np.nan] * len(idx) + [12], index=idx.append(pd.DatetimeIndex([ts])), columns=["value"], diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index f94f1d6aa453f..31abe45215432 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -195,7 +195,7 @@ def test_iloc_array_not_mutating_negative_indices(self): # GH 21867 array_with_neg_numbers = np.array([1, 2, -1]) array_copy = array_with_neg_numbers.copy() - df = pd.DataFrame( + df = DataFrame( {"A": [100, 101, 102], "B": [103, 104, 105], "C": [106, 107, 108]}, index=[1, 2, 3], ) @@ -372,7 +372,7 @@ def test_iloc_setitem_dups(self): def test_iloc_setitem_frame_duplicate_columns_multiple_blocks(self): # Same as the "assign back to self" check in test_iloc_setitem_dups # but on a DataFrame with multiple blocks - df = pd.DataFrame([[0, 1], [2, 3]], columns=["B", "B"]) + df = DataFrame([[0, 1], [2, 3]], columns=["B", "B"]) df.iloc[:, 0] = df.iloc[:, 0].astype("f8") assert len(df._mgr.blocks) == 2 @@ -562,7 +562,7 @@ def test_iloc_setitem_with_scalar_index(self, indexer, value): # assigning like "df.iloc[0, [0]] = ['Z']" should be evaluated # elementwisely, not using "setter('A', ['Z'])". - df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) df.iloc[0, indexer] = value result = df.iloc[0, 0] @@ -712,7 +712,7 @@ def test_series_indexing_zerodim_np_array(self): def test_iloc_setitem_categorical_updates_inplace(self): # Mixed dtype ensures we go through take_split_path in setitem_with_indexer cat = pd.Categorical(["A", "B", "C"]) - df = pd.DataFrame({1: cat, 2: [1, 2, 3]}) + df = DataFrame({1: cat, 2: [1, 2, 3]}) # This should modify our original values in-place df.iloc[:, 0] = cat[::-1] @@ -743,8 +743,8 @@ def test_iloc_with_boolean_operation(self): class TestILocSetItemDuplicateColumns: def test_iloc_setitem_scalar_duplicate_columns(self): # GH#15686, duplicate columns and mixed dtype - df1 = pd.DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}]) - df2 = pd.DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}]) + df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}]) + df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}]) df = pd.concat([df1, df2], axis=1) df.iloc[0, 0] = -1 @@ -754,15 +754,15 @@ def test_iloc_setitem_scalar_duplicate_columns(self): def test_iloc_setitem_list_duplicate_columns(self): # GH#22036 setting with same-sized list - df = pd.DataFrame([[0, "str", "str2"]], columns=["a", "b", "b"]) + df = DataFrame([[0, "str", "str2"]], columns=["a", "b", "b"]) df.iloc[:, 2] = ["str3"] - expected = pd.DataFrame([[0, "str", "str3"]], columns=["a", "b", "b"]) + expected = DataFrame([[0, "str", "str3"]], columns=["a", "b", "b"]) tm.assert_frame_equal(df, expected) def test_iloc_setitem_series_duplicate_columns(self): - df = pd.DataFrame( + df = DataFrame( np.arange(8, dtype=np.int64).reshape(2, 4), columns=["A", "B", "A", "B"] ) df.iloc[:, 0] = df.iloc[:, 0].astype(np.float64) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index b4ea92fae1136..79834dc36ce7d 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -991,7 +991,7 @@ def test_none_coercion_mixed_dtypes(self): def test_extension_array_cross_section(): # A cross-section of a homogeneous EA should be an EA - df = pd.DataFrame( + df = DataFrame( { "A": pd.core.arrays.integer_array([1, 2]), "B": pd.core.arrays.integer_array([3, 4]), @@ -1008,7 +1008,7 @@ def test_extension_array_cross_section(): def test_extension_array_cross_section_converts(): # all numeric columns -> numeric series - df = pd.DataFrame( + df = DataFrame( {"A": pd.array([1, 2], dtype="Int64"), "B": np.array([1, 2])}, index=["a", "b"] ) result = df.loc["a"] @@ -1019,7 +1019,7 @@ def test_extension_array_cross_section_converts(): tm.assert_series_equal(result, expected) # mixed columns -> object series - df = pd.DataFrame( + df = DataFrame( {"A": pd.array([1, 2], dtype="Int64"), "B": np.array(["a", "b"])}, index=["a", "b"], ) @@ -1033,7 +1033,7 @@ def test_extension_array_cross_section_converts(): def test_readonly_indices(): # GH#17192 iloc with read-only array raising TypeError - df = pd.DataFrame({"data": np.ones(100, dtype="float64")}) + df = DataFrame({"data": np.ones(100, dtype="float64")}) indices = np.array([1, 3, 6]) indices.flags.writeable = False @@ -1109,9 +1109,9 @@ def test_long_text_missing_labels_inside_loc_error_message_limited(): def test_setitem_categorical(): # https://github.com/pandas-dev/pandas/issues/35369 - df = pd.DataFrame({"h": Series(list("mn")).astype("category")}) + df = DataFrame({"h": Series(list("mn")).astype("category")}) df.h = df.h.cat.reorder_categories(["n", "m"]) - expected = pd.DataFrame( + expected = DataFrame( {"h": pd.Categorical(["m", "n"]).reorder_categories(["n", "m"])} ) tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index e7f2ad6e8d735..5c5692b777360 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -632,7 +632,7 @@ def test_loc_setitem_with_scalar_index(self, indexer, value): # assigning like "df.loc[0, ['A']] = ['Z']" should be evaluated # elementwisely, not using "setter('A', ['Z'])". - df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) df.loc[0, indexer] = value result = df.loc[0, "A"] @@ -644,7 +644,7 @@ def test_loc_setitem_with_scalar_index(self, indexer, value): ( ([0, 2], ["A", "B", "C", "D"]), 7, - pd.DataFrame( + DataFrame( [[7, 7, 7, 7], [3, 4, np.nan, np.nan], [7, 7, 7, 7]], columns=["A", "B", "C", "D"], ), @@ -652,7 +652,7 @@ def test_loc_setitem_with_scalar_index(self, indexer, value): ( (1, ["C", "D"]), [7, 8], - pd.DataFrame( + DataFrame( [[1, 2, np.nan, np.nan], [3, 4, 7, 8], [5, 6, np.nan, np.nan]], columns=["A", "B", "C", "D"], ), @@ -660,14 +660,14 @@ def test_loc_setitem_with_scalar_index(self, indexer, value): ( (1, ["A", "B", "C"]), np.array([7, 8, 9], dtype=np.int64), - pd.DataFrame( + DataFrame( [[1, 2, np.nan], [7, 8, 9], [5, 6, np.nan]], columns=["A", "B", "C"] ), ), ( (slice(1, 3, None), ["B", "C", "D"]), [[7, 8, 9], [10, 11, 12]], - pd.DataFrame( + DataFrame( [[1, 2, np.nan, np.nan], [3, 7, 8, 9], [5, 10, 11, 12]], columns=["A", "B", "C", "D"], ), @@ -675,15 +675,15 @@ def test_loc_setitem_with_scalar_index(self, indexer, value): ( (slice(1, 3, None), ["C", "A", "D"]), np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int64), - pd.DataFrame( + DataFrame( [[1, 2, np.nan, np.nan], [8, 4, 7, 9], [11, 6, 10, 12]], columns=["A", "B", "C", "D"], ), ), ( (slice(None, None, None), ["A", "C"]), - pd.DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]), - pd.DataFrame( + DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]), + DataFrame( [[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"] ), ), @@ -691,7 +691,7 @@ def test_loc_setitem_with_scalar_index(self, indexer, value): ) def test_loc_setitem_missing_columns(self, index, box, expected): # GH 29334 - df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"]) df.loc[index] = box tm.assert_frame_equal(df, expected) @@ -1010,13 +1010,13 @@ def test_loc_getitem_label_list_integer_labels( def test_loc_setitem_float_intindex(): # GH 8720 rand_data = np.random.randn(8, 4) - result = pd.DataFrame(rand_data) + result = DataFrame(rand_data) result.loc[:, 0.5] = np.nan expected_data = np.hstack((rand_data, np.array([np.nan] * 8).reshape(8, 1))) - expected = pd.DataFrame(expected_data, columns=[0.0, 1.0, 2.0, 3.0, 0.5]) + expected = DataFrame(expected_data, columns=[0.0, 1.0, 2.0, 3.0, 0.5]) tm.assert_frame_equal(result, expected) - result = pd.DataFrame(rand_data) + result = DataFrame(rand_data) result.loc[:, 0.5] = np.nan tm.assert_frame_equal(result, expected) @@ -1024,13 +1024,13 @@ def test_loc_setitem_float_intindex(): def test_loc_axis_1_slice(): # GH 10586 cols = [(yr, m) for yr in [2014, 2015] for m in [7, 8, 9, 10]] - df = pd.DataFrame( + df = DataFrame( np.ones((10, 8)), index=tuple("ABCDEFGHIJ"), columns=pd.MultiIndex.from_tuples(cols), ) result = df.loc(axis=1)[(2014, 9):(2015, 8)] - expected = pd.DataFrame( + expected = DataFrame( np.ones((10, 4)), index=tuple("ABCDEFGHIJ"), columns=pd.MultiIndex.from_tuples( @@ -1042,7 +1042,7 @@ def test_loc_axis_1_slice(): def test_loc_set_dataframe_multiindex(): # GH 14592 - expected = pd.DataFrame( + expected = DataFrame( "a", index=range(2), columns=pd.MultiIndex.from_product([range(2), range(2)]) ) result = expected.copy() @@ -1072,7 +1072,7 @@ def test_loc_with_positional_slice_deprecation(): def test_loc_slice_disallows_positional(): # GH#16121, GH#24612, GH#31810 dti = pd.date_range("2016-01-01", periods=3) - df = pd.DataFrame(np.random.random((3, 2)), index=dti) + df = DataFrame(np.random.random((3, 2)), index=dti) ser = df[0] @@ -1100,7 +1100,7 @@ def test_loc_slice_disallows_positional(): def test_loc_datetimelike_mismatched_dtypes(): # GH#32650 dont mix and match datetime/timedelta/period dtypes - df = pd.DataFrame( + df = DataFrame( np.random.randn(5, 3), columns=["a", "b", "c"], index=pd.date_range("2012", freq="H", periods=5), @@ -1122,7 +1122,7 @@ def test_loc_datetimelike_mismatched_dtypes(): def test_loc_with_period_index_indexer(): # GH#4125 idx = pd.period_range("2002-01", "2003-12", freq="M") - df = pd.DataFrame(np.random.randn(24, 10), index=idx) + df = DataFrame(np.random.randn(24, 10), index=idx) tm.assert_frame_equal(df, df.loc[idx]) tm.assert_frame_equal(df, df.loc[list(idx)]) tm.assert_frame_equal(df, df.loc[list(idx)]) diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 6005f7800178c..45c2725c26526 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -663,21 +663,21 @@ def test_indexing_timeseries_regression(self): def test_index_name_empty(self): # GH 31368 - df = pd.DataFrame({}, index=pd.RangeIndex(0, name="df_index")) + df = DataFrame({}, index=pd.RangeIndex(0, name="df_index")) series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) df["series"] = series - expected = pd.DataFrame( + expected = DataFrame( {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="df_index") ) tm.assert_frame_equal(df, expected) # GH 36527 - df = pd.DataFrame() + df = DataFrame() series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) df["series"] = series - expected = pd.DataFrame( + expected = DataFrame( {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index") ) tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 9a0bfa5c605d9..90f3a392878d9 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1140,7 +1140,7 @@ def test_binop_other(self, op, value, dtype): pytest.skip(f"Invalid combination {op},{dtype}") e = DummyElement(value, dtype) - s = pd.DataFrame({"A": [e.value, e.value]}, dtype=e.dtype) + s = DataFrame({"A": [e.value, e.value]}, dtype=e.dtype) invalid = { (operator.pow, "<M8[ns]"), @@ -1176,7 +1176,7 @@ def test_binop_other(self, op, value, dtype): class TestShouldStore: def test_should_store_categorical(self): cat = pd.Categorical(["A", "B", "C"]) - df = pd.DataFrame(cat) + df = DataFrame(cat) blk = df._mgr.blocks[0] # matching dtype @@ -1244,8 +1244,8 @@ def test_make_block_no_pandas_array(): def test_dataframe_not_equal(): # see GH28839 - df1 = pd.DataFrame({"a": [1, 2], "b": ["s", "d"]}) - df2 = pd.DataFrame({"a": ["s", "d"], "b": [1, 2]}) + df1 = DataFrame({"a": [1, 2], "b": ["s", "d"]}) + df2 = DataFrame({"a": ["s", "d"], "b": [1, 2]}) assert df1.equals(df2) is False diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 7c507b0e371e8..9f3299bcb5e38 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -567,7 +567,7 @@ def test_date_conversion_overflow(self, read_ext): if pd.read_excel.keywords["engine"] == "pyxlsb": pytest.xfail("Sheets containing datetimes not supported by pyxlsb") - expected = pd.DataFrame( + expected = DataFrame( [ [pd.Timestamp("2016-03-12"), "Marc Johnson"], [pd.Timestamp("2016-03-16"), "Jack Black"], @@ -843,7 +843,7 @@ def test_excel_old_index_format(self, read_ext): ["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None ) - expected = pd.DataFrame(data, index=si, columns=columns) + expected = DataFrame(data, index=si, columns=columns) actual = pd.read_excel(filename, sheet_name="single_names", index_col=0) tm.assert_frame_equal(actual, expected) @@ -875,7 +875,7 @@ def test_excel_old_index_format(self, read_ext): ) si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None) - expected = pd.DataFrame(data, index=si, columns=columns) + expected = DataFrame(data, index=si, columns=columns) actual = pd.read_excel(filename, sheet_name="single_no_names", index_col=0) tm.assert_frame_equal(actual, expected) @@ -975,7 +975,7 @@ def test_read_excel_squeeze(self, read_ext): tm.assert_series_equal(actual, expected) actual = pd.read_excel(f, sheet_name="two_columns", squeeze=True) - expected = pd.DataFrame({"a": [4, 5, 6], "b": [2, 3, 4]}) + expected = DataFrame({"a": [4, 5, 6], "b": [2, 3, 4]}) tm.assert_frame_equal(actual, expected) actual = pd.read_excel(f, sheet_name="one_column", squeeze=True) @@ -995,7 +995,7 @@ def test_no_header_with_list_index_col(self, read_ext): idx = pd.MultiIndex.from_tuples( [("A", "A"), ("key", "val"), (1, 2), (1, 2)], names=(0, 1) ) - expected = pd.DataFrame(data, index=idx, columns=(2, 3)) + expected = DataFrame(data, index=idx, columns=(2, 3)) result = pd.read_excel( file_name, sheet_name="index_col_none", index_col=[0, 1], header=None ) @@ -1158,7 +1158,7 @@ def test_excel_read_binary(self, engine, read_ext): def test_excel_high_surrogate(self, engine): # GH 23809 - expected = pd.DataFrame(["\udc88"], columns=["Column1"]) + expected = DataFrame(["\udc88"], columns=["Column1"]) # should not produce a segmentation violation actual = pd.read_excel("high_surrogate.xlsx") @@ -1171,7 +1171,7 @@ def test_header_with_index_col(self, engine, filename): cols = pd.MultiIndex.from_tuples( [("A", "B"), ("A", "B.1")], names=["I11", "I12"] ) - expected = pd.DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64") + expected = DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64") result = pd.read_excel( filename, sheet_name="Sheet1", index_col=0, header=[0, 1] ) @@ -1192,6 +1192,6 @@ def test_read_datetime_multiindex(self, engine, read_ext): pd.to_datetime("03/01/2020").to_pydatetime(), ], ) - expected = pd.DataFrame([], columns=expected_column_index) + expected = DataFrame([], columns=expected_column_index) tm.assert_frame_equal(expected, actual) diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index 0e27b87da9f3e..0a297286aa208 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -68,7 +68,7 @@ class TestRoundTrip: def test_read_one_empty_col_no_header(self, ext, header, expected): # xref gh-12292 filename = "no_header" - df = pd.DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) + df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) with tm.ensure_clean(ext) as path: df.to_excel(path, filename, index=False, header=False) @@ -84,7 +84,7 @@ def test_read_one_empty_col_no_header(self, ext, header, expected): ) def test_read_one_empty_col_with_header(self, ext, header, expected): filename = "with_header" - df = pd.DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) + df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) with tm.ensure_clean(ext) as path: df.to_excel(path, "with_header", index=False, header=True) @@ -97,7 +97,7 @@ def test_read_one_empty_col_with_header(self, ext, header, expected): def test_set_column_names_in_parameter(self, ext): # GH 12870 : pass down column names associated with # keyword argument names - refdf = pd.DataFrame([[1, "foo"], [2, "bar"], [3, "baz"]], columns=["a", "b"]) + refdf = DataFrame([[1, "foo"], [2, "bar"], [3, "baz"]], columns=["a", "b"]) with tm.ensure_clean(ext) as pth: with ExcelWriter(pth) as writer: @@ -169,7 +169,7 @@ def test_read_excel_multiindex_empty_level(self, ext): actual = pd.read_excel(path, header=[0, 1], index_col=0) tm.assert_frame_equal(actual, expected) - df = pd.DataFrame( + df = DataFrame( { ("Beg", ""): {0: 0}, ("Middle", "x"): {0: 1}, @@ -178,7 +178,7 @@ def test_read_excel_multiindex_empty_level(self, ext): } ) - expected = pd.DataFrame( + expected = DataFrame( { ("Beg", "Unnamed: 1_level_1"): {0: 0}, ("Middle", "x"): {0: 1}, @@ -273,11 +273,11 @@ def test_multiindex_interval_datetimes(self, ext): ), ] ) - df = pd.DataFrame(range(4), index=midx) + df = DataFrame(range(4), index=midx) with tm.ensure_clean(ext) as pth: df.to_excel(pth) result = pd.read_excel(pth, index_col=[0, 1]) - expected = pd.DataFrame( + expected = DataFrame( range(4), pd.MultiIndex.from_arrays( [ @@ -328,8 +328,8 @@ def test_excel_sheet_size(self, path): # purposely using two arrays to prevent memory issues while testing row_arr = np.zeros(shape=(breaking_row_count, 1)) col_arr = np.zeros(shape=(1, breaking_col_count)) - row_df = pd.DataFrame(row_arr) - col_df = pd.DataFrame(col_arr) + row_df = DataFrame(row_arr) + col_df = DataFrame(col_arr) msg = "sheet is too large" with pytest.raises(ValueError, match=msg): @@ -759,9 +759,7 @@ def test_to_excel_multiindex(self, merge_cells, frame, path): # GH13511 def test_to_excel_multiindex_nan_label(self, merge_cells, path): - df = pd.DataFrame( - {"A": [None, 2, 3], "B": [10, 20, 30], "C": np.random.sample(3)} - ) + df = DataFrame({"A": [None, 2, 3], "B": [10, 20, 30], "C": np.random.sample(3)}) df = df.set_index(["A", "B"]) df.to_excel(path, merge_cells=merge_cells) @@ -1241,7 +1239,7 @@ def test_render_as_column_name(self, path): def test_true_and_false_value_options(self, path): # see gh-13347 - df = pd.DataFrame([["foo", "bar"]], columns=["col1", "col2"]) + df = DataFrame([["foo", "bar"]], columns=["col1", "col2"]) expected = df.replace({"foo": True, "bar": False}) df.to_excel(path) diff --git a/pandas/tests/io/excel/test_xlwt.py b/pandas/tests/io/excel/test_xlwt.py index a2d8b9fce9767..6bfd71beed0ca 100644 --- a/pandas/tests/io/excel/test_xlwt.py +++ b/pandas/tests/io/excel/test_xlwt.py @@ -1,7 +1,6 @@ import numpy as np import pytest -import pandas as pd from pandas import DataFrame, MultiIndex import pandas._testing as tm @@ -32,7 +31,7 @@ def test_excel_multiindex_columns_and_index_true(ext): cols = MultiIndex.from_tuples( [("site", ""), ("2014", "height"), ("2014", "weight")] ) - df = pd.DataFrame(np.random.randn(10, 3), columns=cols) + df = DataFrame(np.random.randn(10, 3), columns=cols) with tm.ensure_clean(ext) as path: df.to_excel(path, index=True) diff --git a/pandas/tests/io/formats/test_eng_formatting.py b/pandas/tests/io/formats/test_eng_formatting.py index 6801316ada8a3..b8e3122cac5c4 100644 --- a/pandas/tests/io/formats/test_eng_formatting.py +++ b/pandas/tests/io/formats/test_eng_formatting.py @@ -1,6 +1,5 @@ import numpy as np -import pandas as pd from pandas import DataFrame import pandas._testing as tm @@ -214,7 +213,7 @@ def test_nan(self): result = formatter(np.nan) assert result == "NaN" - df = pd.DataFrame( + df = DataFrame( { "a": [1.5, 10.3, 20.5], "b": [50.3, 60.67, 70.12], diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 239bb54f48c16..c57139e50561d 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -249,7 +249,7 @@ def test_repr_chop_threshold(self): def test_repr_chop_threshold_column_below(self): # GH 6839: validation case - df = pd.DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T + df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T with option_context("display.chop_threshold", 0): assert repr(df) == ( @@ -370,7 +370,7 @@ def test_repr_truncates_terminal_size(self, monkeypatch): ("This is a loooooonger title with > 43 chars.", "dog"), ] ) - df = pd.DataFrame(1, index=index, columns=columns) + df = DataFrame(1, index=index, columns=columns) result = repr(df) @@ -381,7 +381,7 @@ def test_repr_truncates_terminal_size(self, monkeypatch): assert "dog" in h2 # regular columns - df2 = pd.DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]}) + df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]}) result = repr(df2) assert df2.columns[0] in result.split("\n")[0] @@ -389,7 +389,7 @@ def test_repr_truncates_terminal_size(self, monkeypatch): def test_repr_truncates_terminal_size_full(self, monkeypatch): # GH 22984 ensure entire window is filled terminal_size = (80, 24) - df = pd.DataFrame(np.random.rand(1, 7)) + df = DataFrame(np.random.rand(1, 7)) monkeypatch.setattr( "pandas.io.formats.format.get_terminal_size", lambda: terminal_size @@ -399,7 +399,7 @@ def test_repr_truncates_terminal_size_full(self, monkeypatch): def test_repr_truncation_column_size(self): # dataframe with last column very wide -> check it is not used to # determine size of truncation (...) column - df = pd.DataFrame( + df = DataFrame( { "a": [108480, 30830], "b": [12345, 12345], @@ -457,13 +457,13 @@ def mkframe(n): assert has_expanded_repr(df) def test_repr_min_rows(self): - df = pd.DataFrame({"a": range(20)}) + df = DataFrame({"a": range(20)}) # default setting no truncation even if above min_rows assert ".." not in repr(df) assert ".." not in df._repr_html_() - df = pd.DataFrame({"a": range(61)}) + df = DataFrame({"a": range(61)}) # default of max_rows 60 triggers truncation if above assert ".." in repr(df) @@ -493,7 +493,7 @@ def test_repr_min_rows(self): def test_str_max_colwidth(self): # GH 7856 - df = pd.DataFrame( + df = DataFrame( [ { "a": "foo", @@ -689,7 +689,7 @@ def test_east_asian_unicode_false(self): # truncate with option_context("display.max_rows", 3, "display.max_columns", 3): - df = pd.DataFrame( + df = DataFrame( { "a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"], @@ -834,7 +834,7 @@ def test_east_asian_unicode_true(self): # truncate with option_context("display.max_rows", 3, "display.max_columns", 3): - df = pd.DataFrame( + df = DataFrame( { "a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"], @@ -1020,7 +1020,7 @@ def test_datetimelike_frame(self): assert "[6 rows x 1 columns]" in result dts = [pd.Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [pd.NaT] * 5 - df = pd.DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) + df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) with option_context("display.max_rows", 5): expected = ( " dt x\n" @@ -1034,7 +1034,7 @@ def test_datetimelike_frame(self): assert repr(df) == expected dts = [pd.NaT] * 5 + [pd.Timestamp("2011-01-01", tz="US/Eastern")] * 5 - df = pd.DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) + df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) with option_context("display.max_rows", 5): expected = ( " dt x\n" @@ -1050,7 +1050,7 @@ def test_datetimelike_frame(self): dts = [pd.Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [ pd.Timestamp("2011-01-01", tz="US/Eastern") ] * 5 - df = pd.DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) + df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) with option_context("display.max_rows", 5): expected = ( " dt x\n" @@ -2001,14 +2001,14 @@ def test_categorical_columns(self): # GH35439 data = [[4, 2], [3, 2], [4, 3]] cols = ["aaaaaaaaa", "b"] - df = pd.DataFrame(data, columns=cols) - df_cat_cols = pd.DataFrame(data, columns=pd.CategoricalIndex(cols)) + df = DataFrame(data, columns=cols) + df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols)) assert df.to_string() == df_cat_cols.to_string() def test_period(self): # GH 12615 - df = pd.DataFrame( + df = DataFrame( { "A": pd.period_range("2013-01", periods=4, freq="M"), "B": [ @@ -2694,9 +2694,7 @@ def test_to_string_header(self): def test_to_string_multindex_header(self): # GH 16718 - df = pd.DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index( - ["a", "b"] - ) + df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"]) res = df.to_string(header=["r1", "r2"]) exp = " r1 r2\na b \n0 1 2 3" assert res == exp @@ -2797,7 +2795,7 @@ def test_output_significant_digits(self): # In case default display precision changes: with pd.option_context("display.precision", 6): # DataFrame example from issue #9764 - d = pd.DataFrame( + d = DataFrame( { "col1": [ 9.999e-8, @@ -2869,11 +2867,11 @@ def test_too_long(self): with pd.option_context("display.precision", 4): # need both a number > 1e6 and something that normally formats to # having length > display.precision + 6 - df = pd.DataFrame(dict(x=[12345.6789])) + df = DataFrame(dict(x=[12345.6789])) assert str(df) == " x\n0 12345.6789" - df = pd.DataFrame(dict(x=[2e6])) + df = DataFrame(dict(x=[2e6])) assert str(df) == " x\n0 2000000.0" - df = pd.DataFrame(dict(x=[12345.6789, 2e6])) + df = DataFrame(dict(x=[12345.6789, 2e6])) assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06" @@ -3205,8 +3203,8 @@ def test_format_percentiles_integer_idx(): def test_repr_html_ipython_config(ip): code = textwrap.dedent( """\ - import pandas as pd - df = pd.DataFrame({"A": [1, 2]}) + from pandas import DataFrame + df = DataFrame({"A": [1, 2]}) df._repr_html_() cfg = get_ipython().config diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 476d75f7d239d..79f9bbace000e 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -28,10 +28,10 @@ def h(x, foo="bar"): self.h = h self.styler = Styler(self.df) - self.attrs = pd.DataFrame({"A": ["color: red", "color: blue"]}) + self.attrs = DataFrame({"A": ["color: red", "color: blue"]}) self.dataframes = [ self.df, - pd.DataFrame( + DataFrame( {"f": [1.0, 2.0], "o": ["a", "b"], "c": pd.Categorical(["a", "b"])} ), ] @@ -110,7 +110,7 @@ def test_clear(self): assert len(s._todo) == 0 def test_render(self): - df = pd.DataFrame({"A": [0, 1]}) + df = DataFrame({"A": [0, 1]}) style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name) s = Styler(df, uuid="AB").apply(style) s.render() @@ -127,7 +127,7 @@ def test_render_empty_dfs(self): # No IndexError raised? def test_render_double(self): - df = pd.DataFrame({"A": [0, 1]}) + df = DataFrame({"A": [0, 1]}) style = lambda x: pd.Series( ["color: red; border: 1px", "color: blue; border: 2px"], name=x.name ) @@ -136,7 +136,7 @@ def test_render_double(self): # it worked? def test_set_properties(self): - df = pd.DataFrame({"A": [0, 1]}) + df = DataFrame({"A": [0, 1]}) result = df.style.set_properties(color="white", size="10px")._compute().ctx # order is deterministic v = ["color: white", "size: 10px"] @@ -146,7 +146,7 @@ def test_set_properties(self): assert sorted(v1) == sorted(v2) def test_set_properties_subset(self): - df = pd.DataFrame({"A": [0, 1]}) + df = DataFrame({"A": [0, 1]}) result = ( df.style.set_properties(subset=pd.IndexSlice[0, "A"], color="white") ._compute() @@ -157,7 +157,7 @@ def test_set_properties_subset(self): def test_empty_index_name_doesnt_display(self): # https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902 - df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) result = df.style._translate() expected = [ @@ -197,7 +197,7 @@ def test_empty_index_name_doesnt_display(self): def test_index_name(self): # https://github.com/pandas-dev/pandas/issues/11655 - df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) result = df.set_index("A").style._translate() expected = [ @@ -235,7 +235,7 @@ def test_index_name(self): def test_multiindex_name(self): # https://github.com/pandas-dev/pandas/issues/11655 - df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) result = df.set_index(["A", "B"]).style._translate() expected = [ @@ -274,11 +274,11 @@ def test_multiindex_name(self): def test_numeric_columns(self): # https://github.com/pandas-dev/pandas/issues/12125 # smoke test for _translate - df = pd.DataFrame({0: [1, 2, 3]}) + df = DataFrame({0: [1, 2, 3]}) df.style._translate() def test_apply_axis(self): - df = pd.DataFrame({"A": [0, 0], "B": [1, 1]}) + df = DataFrame({"A": [0, 0], "B": [1, 1]}) f = lambda x: [f"val: {x.max()}" for v in x] result = df.style.apply(f, axis=1) assert len(result._todo) == 1 @@ -373,7 +373,7 @@ def color_negative_red(val): } idx = pd.IndexSlice - df = pd.DataFrame(dic, index=[0, 1]) + df = DataFrame(dic, index=[0, 1]) (df.style.applymap(color_negative_red, subset=idx[:, idx["b", "d"]]).render()) @@ -468,7 +468,7 @@ def g(x): assert result == expected def test_empty(self): - df = pd.DataFrame({"A": [1, 0]}) + df = DataFrame({"A": [1, 0]}) s = df.style s.ctx = {(0, 0): ["color: red"], (1, 0): [""]} @@ -480,7 +480,7 @@ def test_empty(self): assert result == expected def test_duplicate(self): - df = pd.DataFrame({"A": [1, 0]}) + df = DataFrame({"A": [1, 0]}) s = df.style s.ctx = {(0, 0): ["color: red"], (1, 0): ["color: red"]} @@ -491,7 +491,7 @@ def test_duplicate(self): assert result == expected def test_bar_align_left(self): - df = pd.DataFrame({"A": [0, 1, 2]}) + df = DataFrame({"A": [0, 1, 2]}) result = df.style.bar()._compute().ctx expected = { (0, 0): ["width: 10em", " height: 80%"], @@ -534,7 +534,7 @@ def test_bar_align_left(self): assert result == expected def test_bar_align_left_0points(self): - df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) result = df.style.bar()._compute().ctx expected = { (0, 0): ["width: 10em", " height: 80%"], @@ -620,7 +620,7 @@ def test_bar_align_left_0points(self): assert result == expected def test_bar_align_mid_pos_and_neg(self): - df = pd.DataFrame({"A": [-10, 0, 20, 90]}) + df = DataFrame({"A": [-10, 0, 20, 90]}) result = df.style.bar(align="mid", color=["#d65f5f", "#5fba7d"])._compute().ctx @@ -652,7 +652,7 @@ def test_bar_align_mid_pos_and_neg(self): assert result == expected def test_bar_align_mid_all_pos(self): - df = pd.DataFrame({"A": [10, 20, 50, 100]}) + df = DataFrame({"A": [10, 20, 50, 100]}) result = df.style.bar(align="mid", color=["#d65f5f", "#5fba7d"])._compute().ctx @@ -686,7 +686,7 @@ def test_bar_align_mid_all_pos(self): assert result == expected def test_bar_align_mid_all_neg(self): - df = pd.DataFrame({"A": [-100, -60, -30, -20]}) + df = DataFrame({"A": [-100, -60, -30, -20]}) result = df.style.bar(align="mid", color=["#d65f5f", "#5fba7d"])._compute().ctx @@ -726,7 +726,7 @@ def test_bar_align_mid_all_neg(self): def test_bar_align_zero_pos_and_neg(self): # See https://github.com/pandas-dev/pandas/pull/14757 - df = pd.DataFrame({"A": [-10, 0, 20, 90]}) + df = DataFrame({"A": [-10, 0, 20, 90]}) result = ( df.style.bar(align="zero", color=["#d65f5f", "#5fba7d"], width=90) @@ -760,7 +760,7 @@ def test_bar_align_zero_pos_and_neg(self): assert result == expected def test_bar_align_left_axis_none(self): - df = pd.DataFrame({"A": [0, 1], "B": [2, 4]}) + df = DataFrame({"A": [0, 1], "B": [2, 4]}) result = df.style.bar(axis=None)._compute().ctx expected = { (0, 0): ["width: 10em", " height: 80%"], @@ -786,7 +786,7 @@ def test_bar_align_left_axis_none(self): assert result == expected def test_bar_align_zero_axis_none(self): - df = pd.DataFrame({"A": [0, 1], "B": [-2, 4]}) + df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="zero", axis=None)._compute().ctx expected = { (0, 0): ["width: 10em", " height: 80%"], @@ -815,7 +815,7 @@ def test_bar_align_zero_axis_none(self): assert result == expected def test_bar_align_mid_axis_none(self): - df = pd.DataFrame({"A": [0, 1], "B": [-2, 4]}) + df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None)._compute().ctx expected = { (0, 0): ["width: 10em", " height: 80%"], @@ -843,7 +843,7 @@ def test_bar_align_mid_axis_none(self): assert result == expected def test_bar_align_mid_vmin(self): - df = pd.DataFrame({"A": [0, 1], "B": [-2, 4]}) + df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None, vmin=-6)._compute().ctx expected = { (0, 0): ["width: 10em", " height: 80%"], @@ -872,7 +872,7 @@ def test_bar_align_mid_vmin(self): assert result == expected def test_bar_align_mid_vmax(self): - df = pd.DataFrame({"A": [0, 1], "B": [-2, 4]}) + df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None, vmax=8)._compute().ctx expected = { (0, 0): ["width: 10em", " height: 80%"], @@ -900,7 +900,7 @@ def test_bar_align_mid_vmax(self): assert result == expected def test_bar_align_mid_vmin_vmax_wide(self): - df = pd.DataFrame({"A": [0, 1], "B": [-2, 4]}) + df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None, vmin=-3, vmax=7)._compute().ctx expected = { (0, 0): ["width: 10em", " height: 80%"], @@ -929,7 +929,7 @@ def test_bar_align_mid_vmin_vmax_wide(self): assert result == expected def test_bar_align_mid_vmin_vmax_clipping(self): - df = pd.DataFrame({"A": [0, 1], "B": [-2, 4]}) + df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None, vmin=-1, vmax=3)._compute().ctx expected = { (0, 0): ["width: 10em", " height: 80%"], @@ -957,7 +957,7 @@ def test_bar_align_mid_vmin_vmax_clipping(self): assert result == expected def test_bar_align_mid_nans(self): - df = pd.DataFrame({"A": [1, None], "B": [-1, 3]}) + df = DataFrame({"A": [1, None], "B": [-1, 3]}) result = df.style.bar(align="mid", axis=None)._compute().ctx expected = { (0, 0): [ @@ -984,7 +984,7 @@ def test_bar_align_mid_nans(self): assert result == expected def test_bar_align_zero_nans(self): - df = pd.DataFrame({"A": [1, None], "B": [-1, 2]}) + df = DataFrame({"A": [1, None], "B": [-1, 2]}) result = df.style.bar(align="zero", axis=None)._compute().ctx expected = { (0, 0): [ @@ -1012,14 +1012,14 @@ def test_bar_align_zero_nans(self): assert result == expected def test_bar_bad_align_raises(self): - df = pd.DataFrame({"A": [-100, -60, -30, -20]}) + df = DataFrame({"A": [-100, -60, -30, -20]}) msg = "`align` must be one of {'left', 'zero',' mid'}" with pytest.raises(ValueError, match=msg): df.style.bar(align="poorly", color=["#d65f5f", "#5fba7d"]) def test_format_with_na_rep(self): # GH 21527 28358 - df = pd.DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) + df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) ctx = df.style.format(None, na_rep="-")._translate() assert ctx["body"][0][1]["display_value"] == "-" @@ -1037,7 +1037,7 @@ def test_format_with_na_rep(self): def test_init_with_na_rep(self): # GH 21527 28358 - df = pd.DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) + df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) ctx = Styler(df, na_rep="NA")._translate() assert ctx["body"][0][1]["display_value"] == "NA" @@ -1045,7 +1045,7 @@ def test_init_with_na_rep(self): def test_set_na_rep(self): # GH 21527 28358 - df = pd.DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) + df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) ctx = df.style.set_na_rep("NA")._translate() assert ctx["body"][0][1]["display_value"] == "NA" @@ -1061,7 +1061,7 @@ def test_set_na_rep(self): def test_format_non_numeric_na(self): # GH 21527 28358 - df = pd.DataFrame( + df = DataFrame( { "object": [None, np.nan, "foo"], "datetime": [None, pd.NaT, pd.Timestamp("20120101")], @@ -1082,20 +1082,20 @@ def test_format_non_numeric_na(self): def test_format_with_bad_na_rep(self): # GH 21527 28358 - df = pd.DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) + df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) msg = "Expected a string, got -1 instead" with pytest.raises(TypeError, match=msg): df.style.format(None, na_rep=-1) def test_highlight_null(self, null_color="red"): - df = pd.DataFrame({"A": [0, np.nan]}) + df = DataFrame({"A": [0, np.nan]}) result = df.style.highlight_null()._compute().ctx expected = {(1, 0): ["background-color: red"]} assert result == expected def test_highlight_null_subset(self): # GH 31345 - df = pd.DataFrame({"A": [0, np.nan], "B": [0, np.nan]}) + df = DataFrame({"A": [0, np.nan], "B": [0, np.nan]}) result = ( df.style.highlight_null(null_color="red", subset=["A"]) .highlight_null(null_color="green", subset=["B"]) @@ -1109,7 +1109,7 @@ def test_highlight_null_subset(self): assert result == expected def test_nonunique_raises(self): - df = pd.DataFrame([[1, 2]], columns=["A", "A"]) + df = DataFrame([[1, 2]], columns=["A", "A"]) msg = "style is not supported for non-unique indices." with pytest.raises(ValueError, match=msg): df.style @@ -1139,7 +1139,7 @@ def test_uuid(self): def test_unique_id(self): # See https://github.com/pandas-dev/pandas/issues/16780 - df = pd.DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]}) + df = DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]}) result = df.style.render(uuid="test") assert "test" in result ids = re.findall('id="(.*?)"', result) @@ -1178,13 +1178,13 @@ def test_precision(self): def test_apply_none(self): def f(x): - return pd.DataFrame( + return DataFrame( np.where(x == x.max(), "color: red", ""), index=x.index, columns=x.columns, ) - result = pd.DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx + result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx assert result[(1, 1)] == ["color: red"] def test_trim(self): @@ -1195,7 +1195,7 @@ def test_trim(self): assert result.count("#") == len(self.df.columns) def test_highlight_max(self): - df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) # max(df) = min(-df) for max_ in [True, False]: if max_: @@ -1246,7 +1246,7 @@ def test_export(self): style2.render() def test_display_format(self): - df = pd.DataFrame(np.random.random(size=(2, 2))) + df = DataFrame(np.random.random(size=(2, 2))) ctx = df.style.format("{:0.1f}")._translate() assert all(["display_value" in c for c in row] for row in ctx["body"]) @@ -1256,7 +1256,7 @@ def test_display_format(self): assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3 def test_display_format_raises(self): - df = pd.DataFrame(np.random.randn(2, 2)) + df = DataFrame(np.random.randn(2, 2)) msg = "Expected a template string or callable, got 5 instead" with pytest.raises(TypeError, match=msg): df.style.format(5) @@ -1267,7 +1267,7 @@ def test_display_format_raises(self): def test_display_set_precision(self): # Issue #13257 - df = pd.DataFrame(data=[[1.0, 2.0090], [3.2121, 4.566]], columns=["a", "b"]) + df = DataFrame(data=[[1.0, 2.0090], [3.2121, 4.566]], columns=["a", "b"]) s = Styler(df) ctx = s.set_precision(1)._translate() @@ -1293,7 +1293,7 @@ def test_display_set_precision(self): assert ctx["body"][1][2]["display_value"] == "4.566" def test_display_subset(self): - df = pd.DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) + df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) ctx = df.style.format( {"a": "{:0.1f}", "b": "{0:.2%}"}, subset=pd.IndexSlice[0, :] )._translate() @@ -1324,7 +1324,7 @@ def test_display_subset(self): assert ctx["body"][1][2]["display_value"] == raw_11 def test_display_dict(self): - df = pd.DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) + df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"})._translate() assert ctx["body"][0][1]["display_value"] == "0.1" assert ctx["body"][0][2]["display_value"] == "12.34%" @@ -1334,7 +1334,7 @@ def test_display_dict(self): assert ctx["body"][0][3]["display_value"] == "AAA" def test_bad_apply_shape(self): - df = pd.DataFrame([[1, 2], [3, 4]]) + df = DataFrame([[1, 2], [3, 4]]) msg = "returned the wrong shape" with pytest.raises(ValueError, match=msg): df.style._apply(lambda x: "x", subset=pd.IndexSlice[[0, 1], :]) @@ -1356,16 +1356,16 @@ def test_apply_bad_return(self): def f(x): return "" - df = pd.DataFrame([[1, 2], [3, 4]]) + df = DataFrame([[1, 2], [3, 4]]) msg = "must return a DataFrame when passed to `Styler.apply` with axis=None" with pytest.raises(TypeError, match=msg): df.style._apply(f, axis=None) def test_apply_bad_labels(self): def f(x): - return pd.DataFrame(index=[1, 2], columns=["a", "b"]) + return DataFrame(index=[1, 2], columns=["a", "b"]) - df = pd.DataFrame([[1, 2], [3, 4]]) + df = DataFrame([[1, 2], [3, 4]]) msg = "must have identical index and columns as the input" with pytest.raises(ValueError, match=msg): df.style._apply(f, axis=None) @@ -1400,7 +1400,7 @@ def test_get_level_lengths_un_sorted(self): tm.assert_dict_equal(result, expected) def test_mi_sparse(self): - df = pd.DataFrame( + df = DataFrame( {"A": [1, 2]}, index=pd.MultiIndex.from_arrays([["a", "a"], [0, 1]]) ) @@ -1467,7 +1467,7 @@ def test_mi_sparse(self): def test_mi_sparse_disabled(self): with pd.option_context("display.multi_sparse", False): - df = pd.DataFrame( + df = DataFrame( {"A": [1, 2]}, index=pd.MultiIndex.from_arrays([["a", "a"], [0, 1]]) ) result = df.style._translate() @@ -1476,7 +1476,7 @@ def test_mi_sparse_disabled(self): assert "attributes" not in row[0] def test_mi_sparse_index_names(self): - df = pd.DataFrame( + df = DataFrame( {"A": [1, 2]}, index=pd.MultiIndex.from_arrays( [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"] @@ -1493,7 +1493,7 @@ def test_mi_sparse_index_names(self): assert head == expected def test_mi_sparse_column_names(self): - df = pd.DataFrame( + df = DataFrame( np.arange(16).reshape(4, 4), index=pd.MultiIndex.from_arrays( [["a", "a", "b", "a"], [0, 1, 1, 2]], @@ -1574,7 +1574,7 @@ def test_hide_single_index(self): def test_hide_multiindex(self): # GH 14194 - df = pd.DataFrame( + df = DataFrame( {"A": [1, 2]}, index=pd.MultiIndex.from_arrays( [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"] @@ -1628,7 +1628,7 @@ def test_hide_columns_mult_levels(self): i2 = pd.MultiIndex.from_arrays( [["b", "b"], [0, 1]], names=["col_level_0", "col_level_1"] ) - df = pd.DataFrame([[1, 2], [3, 4]], index=i1, columns=i2) + df = DataFrame([[1, 2], [3, 4]], index=i1, columns=i2) ctx = df.style._translate() # column headers assert ctx["head"][0][2]["is_visible"] @@ -1685,7 +1685,7 @@ def f(a, b, styler): def test_no_cell_ids(self): # GH 35588 # GH 35663 - df = pd.DataFrame(data=[[0]]) + df = DataFrame(data=[[0]]) styler = Styler(df, uuid="_", cell_ids=False) styler.render() s = styler.render() # render twice to ensure ctx is not updated @@ -1714,14 +1714,14 @@ def test_set_data_classes(self, classes): def test_colspan_w3(self): # GH 36223 - df = pd.DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]]) + df = DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]]) s = Styler(df, uuid="_", cell_ids=False) assert '<th class="col_heading level0 col0" colspan="2">l0</th>' in s.render() @pytest.mark.parametrize("len_", [1, 5, 32, 33, 100]) def test_uuid_len(self, len_): # GH 36345 - df = pd.DataFrame(data=[["A"]]) + df = DataFrame(data=[["A"]]) s = Styler(df, uuid_len=len_, cell_ids=False).render() strt = s.find('id="T_') end = s[strt + 6 :].find('"') @@ -1733,7 +1733,7 @@ def test_uuid_len(self, len_): @pytest.mark.parametrize("len_", [-2, "bad", None]) def test_uuid_len_raises(self, len_): # GH 36345 - df = pd.DataFrame(data=[["A"]]) + df = DataFrame(data=[["A"]]) msg = "``uuid_len`` must be an integer in range \\[0, 32\\]." with pytest.raises(TypeError, match=msg): Styler(df, uuid_len=len_, cell_ids=False).render() @@ -1742,7 +1742,7 @@ def test_uuid_len_raises(self, len_): @td.skip_if_no_mpl class TestStylerMatplotlibDep: def test_background_gradient(self): - df = pd.DataFrame([[1, 2], [2, 4]], columns=["A", "B"]) + df = DataFrame([[1, 2], [2, 4]], columns=["A", "B"]) for c_map in [None, "YlOrRd"]: result = df.style.background_gradient(cmap=c_map)._compute().ctx @@ -1776,13 +1776,13 @@ def test_background_gradient(self): ], ) def test_text_color_threshold(self, c_map, expected): - df = pd.DataFrame([1, 2], columns=["A"]) + df = DataFrame([1, 2], columns=["A"]) result = df.style.background_gradient(cmap=c_map)._compute().ctx assert result == expected @pytest.mark.parametrize("text_color_threshold", [1.1, "1", -1, [2, 2]]) def test_text_color_threshold_raises(self, text_color_threshold): - df = pd.DataFrame([[1, 2], [2, 4]], columns=["A", "B"]) + df = DataFrame([[1, 2], [2, 4]], columns=["A", "B"]) msg = "`text_color_threshold` must be a value from 0 to 1." with pytest.raises(ValueError, match=msg): df.style.background_gradient( @@ -1791,7 +1791,7 @@ def test_text_color_threshold_raises(self, text_color_threshold): @td.skip_if_no_mpl def test_background_gradient_axis(self): - df = pd.DataFrame([[1, 2], [2, 4]], columns=["A", "B"]) + df = DataFrame([[1, 2], [2, 4]], columns=["A", "B"]) low = ["background-color: #f7fbff", "color: #000000"] high = ["background-color: #08306b", "color: #f1f1f1"] @@ -1816,7 +1816,7 @@ def test_background_gradient_axis(self): def test_background_gradient_vmin_vmax(self): # GH 12145 - df = pd.DataFrame(range(5)) + df = DataFrame(range(5)) ctx = df.style.background_gradient(vmin=1, vmax=3)._compute().ctx assert ctx[(0, 0)] == ctx[(1, 0)] assert ctx[(4, 0)] == ctx[(3, 0)] @@ -1873,5 +1873,5 @@ def test_from_custom_template(tmpdir): assert issubclass(result, Styler) assert result.env is not Styler.env assert result.template is not Styler.template - styler = result(pd.DataFrame({"A": [1, 2]})) + styler = result(DataFrame({"A": [1, 2]})) assert styler.render() diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index e2ceb95d77053..3584ec047d4d2 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -150,7 +150,7 @@ def test_to_csv_decimal(self): ) # see gh-11553: testing if decimal is taken into account for '0.0' - df = pd.DataFrame({"a": [0, 1.1], "b": [2.2, 3.3], "c": 1}) + df = DataFrame({"a": [0, 1.1], "b": [2.2, 3.3], "c": 1}) expected_rows = ["a,b,c", "0^0,2^2,1", "1^1,3^3,1"] expected = tm.convert_rows_list_to_csv_str(expected_rows) @@ -165,7 +165,7 @@ def test_to_csv_decimal(self): def test_to_csv_float_format(self): # testing if float_format is taken into account for the index # GH 11553 - df = pd.DataFrame({"a": [0, 1], "b": [2.2, 3.3], "c": 1}) + df = DataFrame({"a": [0, 1], "b": [2.2, 3.3], "c": 1}) expected_rows = ["a,b,c", "0,2.20,1", "1,3.30,1"] expected = tm.convert_rows_list_to_csv_str(expected_rows) @@ -334,7 +334,7 @@ def test_to_csv_single_level_multi_index(self, ind, expected, klass): def test_to_csv_string_array_ascii(self): # GH 10813 str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}] - df = pd.DataFrame(str_array) + df = DataFrame(str_array) expected_ascii = """\ ,names 0,"['foo', 'bar']" @@ -348,7 +348,7 @@ def test_to_csv_string_array_ascii(self): def test_to_csv_string_array_utf8(self): # GH 10813 str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}] - df = pd.DataFrame(str_array) + df = DataFrame(str_array) expected_utf8 = """\ ,names 0,"['foo', 'bar']" @@ -362,7 +362,7 @@ def test_to_csv_string_array_utf8(self): def test_to_csv_string_with_lf(self): # GH 20353 data = {"int": [1, 2, 3], "str_lf": ["abc", "d\nef", "g\nh\n\ni"]} - df = pd.DataFrame(data) + df = DataFrame(data) with tm.ensure_clean("lf_test.csv") as path: # case 1: The default line terminator(=os.linesep)(PR 21406) os_linesep = os.linesep.encode("utf-8") @@ -396,7 +396,7 @@ def test_to_csv_string_with_lf(self): def test_to_csv_string_with_crlf(self): # GH 20353 data = {"int": [1, 2, 3], "str_crlf": ["abc", "d\r\nef", "g\r\nh\r\n\r\ni"]} - df = pd.DataFrame(data) + df = DataFrame(data) with tm.ensure_clean("crlf_test.csv") as path: # case 1: The default line terminator(=os.linesep)(PR 21406) os_linesep = os.linesep.encode("utf-8") @@ -434,9 +434,7 @@ def test_to_csv_string_with_crlf(self): def test_to_csv_stdout_file(self, capsys): # GH 21561 - df = pd.DataFrame( - [["foo", "bar"], ["baz", "qux"]], columns=["name_1", "name_2"] - ) + df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["name_1", "name_2"]) expected_rows = [",name_1,name_2", "0,foo,bar", "1,baz,qux"] expected_ascii = tm.convert_rows_list_to_csv_str(expected_rows) @@ -456,7 +454,7 @@ def test_to_csv_stdout_file(self, capsys): ) def test_to_csv_write_to_open_file(self): # GH 21696 - df = pd.DataFrame({"a": ["x", "y", "z"]}) + df = DataFrame({"a": ["x", "y", "z"]}) expected = """\ manual header x @@ -473,7 +471,7 @@ def test_to_csv_write_to_open_file(self): def test_to_csv_write_to_open_file_with_newline_py3(self): # see gh-21696 # see gh-20353 - df = pd.DataFrame({"a": ["x", "y", "z"]}) + df = DataFrame({"a": ["x", "y", "z"]}) expected_rows = ["x", "y", "z"] expected = "manual header\n" + tm.convert_rows_list_to_csv_str(expected_rows) with tm.ensure_clean("test.txt") as path: @@ -557,7 +555,7 @@ def test_to_csv_zip_arguments(self, compression, archive_name): @pytest.mark.parametrize("df_new_type", ["Int64"]) def test_to_csv_na_rep_long_string(self, df_new_type): # see gh-25099 - df = pd.DataFrame({"c": [float("nan")] * 3}) + df = DataFrame({"c": [float("nan")] * 3}) df = df.astype(df_new_type) expected_rows = ["c", "mynull", "mynull", "mynull"] expected = tm.convert_rows_list_to_csv_str(expected_rows) @@ -635,7 +633,7 @@ def test_to_csv_encoding_binary_handle(self): # example from GH 13068 with tm.ensure_clean() as path: with open(path, "w+b") as handle: - pd.DataFrame().to_csv(handle, mode="w+b", encoding="utf-8-sig") + DataFrame().to_csv(handle, mode="w+b", encoding="utf-8-sig") handle.seek(0) assert handle.read().startswith(b'\xef\xbb\xbf""') diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 7acdbfd462874..18cbd7186e931 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -787,13 +787,13 @@ def test_html_repr_min_rows_default(datapath): # gh-27991 # default setting no truncation even if above min_rows - df = pd.DataFrame({"a": range(20)}) + df = DataFrame({"a": range(20)}) result = df._repr_html_() expected = expected_html(datapath, "html_repr_min_rows_default_no_truncation") assert result == expected # default of max_rows 60 triggers truncation if above - df = pd.DataFrame({"a": range(61)}) + df = DataFrame({"a": range(61)}) result = df._repr_html_() expected = expected_html(datapath, "html_repr_min_rows_default_truncated") assert result == expected @@ -815,7 +815,7 @@ def test_html_repr_min_rows_default(datapath): def test_html_repr_min_rows(datapath, max_rows, min_rows, expected): # gh-27991 - df = pd.DataFrame({"a": range(61)}) + df = DataFrame({"a": range(61)}) expected = expected_html(datapath, expected) with option_context("display.max_rows", max_rows, "display.min_rows", min_rows): result = df._repr_html_() diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index b2edb5309f299..855e69dee7db4 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -157,7 +157,7 @@ def test_to_latex_series(self): def test_to_latex_midrule_location(self): # GH 18326 - df = pd.DataFrame({"a": [1, 2]}) + df = DataFrame({"a": [1, 2]}) df.index.name = "foo" result = df.to_latex(index_names=False) expected = _dedent( @@ -373,7 +373,7 @@ def test_to_latex_decimal(self): class TestToLatexBold: def test_to_latex_bold_rows(self): # GH 16707 - df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) result = df.to_latex(bold_rows=True) expected = _dedent( r""" @@ -391,7 +391,7 @@ def test_to_latex_bold_rows(self): def test_to_latex_no_bold_rows(self): # GH 16707 - df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) result = df.to_latex(bold_rows=False) expected = _dedent( r""" @@ -572,7 +572,7 @@ def test_to_latex_caption_shortcaption_and_label( ) def test_to_latex_bad_caption_raises(self, bad_caption): # test that wrong number of params is raised - df = pd.DataFrame({"a": [1]}) + df = DataFrame({"a": [1]}) msg = "caption must be either a string or a tuple of two strings" with pytest.raises(ValueError, match=msg): df.to_latex(caption=bad_caption) @@ -990,7 +990,7 @@ def multiindex_frame(self): @pytest.fixture def multicolumn_frame(self): """Multicolumn dataframe for testing multicolumn LaTeX macros.""" - yield pd.DataFrame( + yield DataFrame( { ("c1", 0): {x: x for x in range(5)}, ("c1", 1): {x: x + 5 for x in range(5)}, @@ -1002,7 +1002,7 @@ def multicolumn_frame(self): def test_to_latex_multindex_header(self): # GH 16718 - df = pd.DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}) + df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}) df = df.set_index(["a", "b"]) observed = df.to_latex(header=["r1", "r2"]) expected = _dedent( @@ -1022,7 +1022,7 @@ def test_to_latex_multindex_header(self): def test_to_latex_multiindex_empty_name(self): # GH 18669 mi = pd.MultiIndex.from_product([[1, 2]], names=[""]) - df = pd.DataFrame(-1, index=mi, columns=range(4)) + df = DataFrame(-1, index=mi, columns=range(4)) observed = df.to_latex() expected = _dedent( r""" @@ -1115,7 +1115,7 @@ def test_to_latex_multicolumn_tabular(self, multiindex_frame): def test_to_latex_index_has_name_tabular(self): # GH 10660 - df = pd.DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]}) + df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]}) result = df.set_index(["a", "b"]).to_latex() expected = _dedent( r""" @@ -1136,7 +1136,7 @@ def test_to_latex_index_has_name_tabular(self): def test_to_latex_groupby_tabular(self): # GH 10660 - df = pd.DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]}) + df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]}) result = df.groupby("a").describe().to_latex() expected = _dedent( r""" @@ -1162,7 +1162,7 @@ def test_to_latex_multiindex_dupe_level(self): # ONLY happen if all higher order indices (to the left) are # equal too. In this test, 'c' has to be printed both times # because the higher order index 'A' != 'B'. - df = pd.DataFrame( + df = DataFrame( index=pd.MultiIndex.from_tuples([("A", "c"), ("B", "c")]), columns=["col"] ) result = df.to_latex() @@ -1275,7 +1275,7 @@ def test_to_latex_multiindex_names(self, name0, name1, axes): # GH 18667 names = [name0, name1] mi = pd.MultiIndex.from_product([[1, 2], [3, 4]]) - df = pd.DataFrame(-1, index=mi.copy(), columns=mi.copy()) + df = DataFrame(-1, index=mi.copy(), columns=mi.copy()) for idx in axes: df.axes[idx].names = names @@ -1307,7 +1307,7 @@ def test_to_latex_multiindex_names(self, name0, name1, axes): @pytest.mark.parametrize("one_row", [True, False]) def test_to_latex_multiindex_nans(self, one_row): # GH 14249 - df = pd.DataFrame({"a": [None, 1], "b": [2, 3], "c": [4, 5]}) + df = DataFrame({"a": [None, 1], "b": [2, 3], "c": [4, 5]}) if one_row: df = df.iloc[[0]] observed = df.set_index(["a", "b"]).to_latex() @@ -1331,7 +1331,7 @@ def test_to_latex_multiindex_nans(self, one_row): def test_to_latex_non_string_index(self): # GH 19981 - df = pd.DataFrame([[1, 2, 3]] * 2).set_index([0, 1]) + df = DataFrame([[1, 2, 3]] * 2).set_index([0, 1]) result = df.to_latex() expected = _dedent( r""" diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 8f1ed193b100f..71698a02285f9 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -240,7 +240,7 @@ def test_build_series(self): def test_read_json_from_to_json_results(self): # GH32383 - df = pd.DataFrame( + df = DataFrame( { "_id": {"row_0": 0}, "category": {"row_0": "Goods"}, @@ -616,13 +616,13 @@ def test_set_names_unset(self, idx, nm, prop): ) def test_warns_non_roundtrippable_names(self, idx): # GH 19130 - df = pd.DataFrame(index=idx) + df = DataFrame(index=idx) df.index.name = "index" with tm.assert_produces_warning(): set_default_names(df) def test_timestamp_in_columns(self): - df = pd.DataFrame( + df = DataFrame( [[1, 2]], columns=[pd.Timestamp("2016"), pd.Timedelta(10, unit="s")] ) result = df.to_json(orient="table") @@ -634,8 +634,8 @@ def test_timestamp_in_columns(self): "case", [ pd.Series([1], index=pd.Index([1], name="a"), name="a"), - pd.DataFrame({"A": [1]}, index=pd.Index([1], name="A")), - pd.DataFrame( + DataFrame({"A": [1]}, index=pd.Index([1], name="A")), + DataFrame( {"A": [1]}, index=pd.MultiIndex.from_arrays([["a"], [1]], names=["A", "a"]), ), @@ -647,7 +647,7 @@ def test_overlapping_names(self, case): def test_mi_falsey_name(self): # GH 16203 - df = pd.DataFrame( + df = DataFrame( np.random.randn(4, 4), index=pd.MultiIndex.from_product([("A", "B"), ("a", "b")]), ) @@ -730,7 +730,7 @@ def test_comprehensive(self): ) def test_multiindex(self, index_names): # GH 18912 - df = pd.DataFrame( + df = DataFrame( [["Arr", "alpha", [1, 2, 3, 4]], ["Bee", "Beta", [10, 20, 30, 40]]], index=[["A", "B"], ["Null", "Eins"]], columns=["Aussprache", "Griechisch", "Args"], @@ -742,7 +742,7 @@ def test_multiindex(self, index_names): def test_empty_frame_roundtrip(self): # GH 21287 - df = pd.DataFrame(columns=["a", "b", "c"]) + df = DataFrame(columns=["a", "b", "c"]) expected = df.copy() out = df.to_json(orient="table") result = pd.read_json(out, orient="table") diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 4d8d4ecb50a5a..92cc0f969ec87 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -106,7 +106,7 @@ def test_frame_non_unique_columns(self, orient, data): df.to_json(orient=orient), orient=orient, convert_dates=["x"] ) if orient == "values": - expected = pd.DataFrame(data) + expected = DataFrame(data) if expected.iloc[:, 0].dtype == "datetime64[ns]": # orient == "values" by default will write Timestamp objects out # in milliseconds; these are internally stored in nanosecond, @@ -373,7 +373,7 @@ def test_frame_infinity(self, orient, inf, dtype): ], ) def test_frame_to_json_float_precision(self, value, precision, expected_val): - df = pd.DataFrame([dict(a_float=value)]) + df = DataFrame([dict(a_float=value)]) encoded = df.to_json(double_precision=precision) assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}' @@ -390,7 +390,7 @@ def test_frame_empty(self): read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False ) # GH 7445 - result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns") + result = DataFrame({"test": []}, index=[]).to_json(orient="columns") expected = '{"test":{}}' assert result == expected @@ -599,7 +599,7 @@ def __str__(self) -> str: def test_label_overflow(self): # GH14256: buffer length not checked when writing label - result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json() + result = DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json() expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}' assert result == expected @@ -1143,7 +1143,7 @@ def test_datetime_tz(self): def test_sparse(self): # GH4377 df.to_json segfaults with non-ndarray blocks - df = pd.DataFrame(np.random.randn(10, 4)) + df = DataFrame(np.random.randn(10, 4)) df.loc[:8] = np.nan sdf = df.astype("Sparse") @@ -1366,7 +1366,7 @@ def test_from_json_to_json_table_index_and_columns(self, index, columns): def test_from_json_to_json_table_dtypes(self): # GH21345 - expected = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) + expected = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) dfjson = expected.to_json(orient="table") result = pd.read_json(dfjson, orient="table") tm.assert_frame_equal(result, expected) @@ -1374,7 +1374,7 @@ def test_from_json_to_json_table_dtypes(self): @pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}]) def test_read_json_table_dtype_raises(self, dtype): # GH21345 - df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) + df = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) dfjson = df.to_json(orient="table") msg = "cannot pass both dtype and orient='table'" with pytest.raises(ValueError, match=msg): @@ -1459,7 +1459,7 @@ def test_index_false_error_to_json(self, orient): # GH 17394 # Testing error message from to_json with index=False - df = pd.DataFrame([[1, 2], [4, 5]], columns=["a", "b"]) + df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"]) msg = "'index=False' is only valid when 'orient' is 'split' or 'table'" with pytest.raises(ValueError, match=msg): @@ -1487,7 +1487,7 @@ def test_read_timezone_information(self): "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")] ) def test_timedelta_as_label(self, date_format, key): - df = pd.DataFrame([[1]], columns=[pd.Timedelta("1D")]) + df = DataFrame([[1]], columns=[pd.Timedelta("1D")]) expected = f'{{"{key}":{{"0":1}}}}' result = df.to_json(date_format=date_format) @@ -1506,14 +1506,14 @@ def test_timedelta_as_label(self, date_format, key): ) def test_tuple_labels(self, orient, expected): # GH 20500 - df = pd.DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")]) + df = DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")]) result = df.to_json(orient=orient) assert result == expected @pytest.mark.parametrize("indent", [1, 2, 4]) def test_to_json_indent(self, indent): # GH 12004 - df = pd.DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"]) + df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"]) result = df.to_json(indent=indent) spaces = " " * indent @@ -1649,19 +1649,19 @@ def test_to_json_indent(self, indent): ) def test_json_indent_all_orients(self, orient, expected): # GH 12004 - df = pd.DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"]) + df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"]) result = df.to_json(orient=orient, indent=4) assert result == expected def test_json_negative_indent_raises(self): with pytest.raises(ValueError, match="must be a nonnegative integer"): - pd.DataFrame().to_json(indent=-1) + DataFrame().to_json(indent=-1) def test_emca_262_nan_inf_support(self): # GH 12213 data = '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]' result = pd.read_json(data) - expected = pd.DataFrame( + expected = DataFrame( ["a", np.nan, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"] ) tm.assert_frame_equal(result, expected) @@ -1684,7 +1684,7 @@ def test_frame_int_overflow(self): "dataframe,expected", [ ( - pd.DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}), + DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}), '{"(0, \'x\')":1,"(0, \'y\')":"a","(1, \'x\')":2,' '"(1, \'y\')":"b","(2, \'x\')":3,"(2, \'y\')":"c"}', ) @@ -1719,12 +1719,12 @@ def test_to_s3(self, s3_resource, s3so): def test_json_pandas_na(self): # GH 31615 - result = pd.DataFrame([[pd.NA]]).to_json() + result = DataFrame([[pd.NA]]).to_json() assert result == '{"0":{"0":null}}' def test_json_pandas_nulls(self, nulls_fixture): # GH 31615 - result = pd.DataFrame([[nulls_fixture]]).to_json() + result = DataFrame([[nulls_fixture]]).to_json() assert result == '{"0":{"0":null}}' def test_readjson_bool_series(self): diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py index a6ffa7e97d375..933bdc462e3f8 100644 --- a/pandas/tests/io/json/test_readlines.py +++ b/pandas/tests/io/json/test_readlines.py @@ -12,7 +12,7 @@ @pytest.fixture def lines_json_df(): - df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) return df.to_json(lines=True, orient="records") @@ -112,7 +112,7 @@ def test_readjson_each_chunk(lines_json_df): def test_readjson_chunks_from_file(): with tm.ensure_clean("test.json") as path: - df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) df.to_json(path, lines=True, orient="records") chunked = pd.concat(pd.read_json(path, lines=True, chunksize=1)) unchunked = pd.read_json(path, lines=True) @@ -122,7 +122,7 @@ def test_readjson_chunks_from_file(): @pytest.mark.parametrize("chunksize", [None, 1]) def test_readjson_chunks_closes(chunksize): with tm.ensure_clean("test.json") as path: - df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) df.to_json(path, lines=True, orient="records") reader = JsonReader( path, @@ -173,7 +173,7 @@ def test_readjson_chunks_multiple_empty_lines(chunksize): {"A":3,"B":6} """ - orig = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + orig = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) test = pd.read_json(j, lines=True, chunksize=chunksize) if chunksize is not None: test = pd.concat(test) @@ -187,7 +187,7 @@ def test_readjson_unicode(monkeypatch): f.write('{"£©µÀÆÖÞßéöÿ":["АБВГДабвгд가"]}') result = read_json(path) - expected = pd.DataFrame({"£©µÀÆÖÞßéöÿ": ["АБВГДабвгд가"]}) + expected = DataFrame({"£©µÀÆÖÞßéöÿ": ["АБВГДабвгд가"]}) tm.assert_frame_equal(result, expected) @@ -200,7 +200,7 @@ def test_readjson_nrows(nrows): {"a": 5, "b": 6} {"a": 7, "b": 8}""" result = pd.read_json(jsonl, lines=True, nrows=nrows) - expected = pd.DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows] + expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows] tm.assert_frame_equal(result, expected) @@ -214,7 +214,7 @@ def test_readjson_nrows_chunks(nrows, chunksize): {"a": 7, "b": 8}""" reader = read_json(jsonl, lines=True, nrows=nrows, chunksize=chunksize) chunked = pd.concat(reader) - expected = pd.DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows] + expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows] tm.assert_frame_equal(chunked, expected) @@ -234,9 +234,9 @@ def test_readjson_lines_chunks_fileurl(datapath): # GH 27135 # Test reading line-format JSON from file url df_list_expected = [ - pd.DataFrame([[1, 2]], columns=["a", "b"], index=[0]), - pd.DataFrame([[3, 4]], columns=["a", "b"], index=[1]), - pd.DataFrame([[5, 6]], columns=["a", "b"], index=[2]), + DataFrame([[1, 2]], columns=["a", "b"], index=[0]), + DataFrame([[3, 4]], columns=["a", "b"], index=[1]), + DataFrame([[5, 6]], columns=["a", "b"], index=[2]), ] os_path = datapath("io", "json", "data", "line_delimited.json") file_url = Path(os_path).as_uri() diff --git a/pandas/tests/io/parser/test_dtypes.py b/pandas/tests/io/parser/test_dtypes.py index 6ac310e3b2227..861aeba60cab7 100644 --- a/pandas/tests/io/parser/test_dtypes.py +++ b/pandas/tests/io/parser/test_dtypes.py @@ -577,7 +577,7 @@ def test_boolean_dtype(all_parsers): ) result = parser.read_csv(StringIO(data), dtype="boolean") - expected = pd.DataFrame( + expected = DataFrame( { "a": pd.array( [ diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index d45317aaa3458..4796cf0b79fae 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -509,7 +509,7 @@ def test_dtype(dtype): colspecs = [(0, 5), (5, 10), (10, None)] result = read_fwf(StringIO(data), colspecs=colspecs, dtype=dtype) - expected = pd.DataFrame( + expected = DataFrame( {"a": [1, 3], "b": [2, 4], "c": [3.2, 5.2]}, columns=["a", "b", "c"] ) @@ -625,7 +625,7 @@ def test_binary_mode(): """ data = """aas aas aas bba bab b a""" - df_reference = pd.DataFrame( + df_reference = DataFrame( [["bba", "bab", "b a"]], columns=["aas", "aas.1", "aas.2"], index=[0] ) with tm.ensure_clean() as path: @@ -653,5 +653,5 @@ def test_encoding_mmap(memory_map): memory_map=memory_map, ) data.seek(0) - df_reference = pd.DataFrame([[1, "A", "Ä", 2]]) + df_reference = DataFrame([[1, "A", "Ä", 2]]) tm.assert_frame_equal(df, df_reference) diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index 7eeba97b799ae..ba2805f2f063f 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -64,7 +64,7 @@ @pytest.mark.single class TestHDFStore: def test_format_type(self, setup_path): - df = pd.DataFrame({"A": [1, 2]}) + df = DataFrame({"A": [1, 2]}) with ensure_clean_path(setup_path) as path: with HDFStore(path) as store: store.put("a", df, format="fixed") @@ -300,7 +300,7 @@ def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128): def create_h5_and_return_checksum(track_times): with ensure_clean_path(setup_path) as path: - df = pd.DataFrame({"a": [1]}) + df = DataFrame({"a": [1]}) with pd.HDFStore(path, mode="w") as hdf: hdf.put( @@ -657,10 +657,10 @@ def test_get(self, setup_path): def test_walk(self, where, expected, setup_path): # GH10143 objs = { - "df1": pd.DataFrame([1, 2, 3]), - "df2": pd.DataFrame([4, 5, 6]), - "df3": pd.DataFrame([6, 7, 8]), - "df4": pd.DataFrame([9, 10, 11]), + "df1": DataFrame([1, 2, 3]), + "df2": DataFrame([4, 5, 6]), + "df3": DataFrame([6, 7, 8]), + "df4": DataFrame([9, 10, 11]), "s1": Series([10, 9, 8]), # Next 3 items aren't pandas objects and should be ignored "a1": np.array([[1, 2, 3], [4, 5, 6]]), @@ -1267,7 +1267,7 @@ def test_append_all_nans(self, setup_path): def test_read_missing_key_close_store(self, setup_path): # GH 25766 with ensure_clean_path(setup_path) as path: - df = pd.DataFrame({"a": range(2), "b": range(2)}) + df = DataFrame({"a": range(2), "b": range(2)}) df.to_hdf(path, "k1") with pytest.raises(KeyError, match="'No object named k2 in the file'"): @@ -1280,7 +1280,7 @@ def test_read_missing_key_close_store(self, setup_path): def test_read_missing_key_opened_store(self, setup_path): # GH 28699 with ensure_clean_path(setup_path) as path: - df = pd.DataFrame({"a": range(2), "b": range(2)}) + df = DataFrame({"a": range(2), "b": range(2)}) df.to_hdf(path, "k1") with pd.HDFStore(path, "r") as store: @@ -1921,7 +1921,7 @@ def test_mi_data_columns(self, setup_path): idx = pd.MultiIndex.from_arrays( [date_range("2000-01-01", periods=5), range(5)], names=["date", "id"] ) - df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx) + df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx) with ensure_clean_store(setup_path) as store: store.append("df", df, data_columns=True) @@ -2541,7 +2541,7 @@ def test_store_index_name_numpy_str(self, table_format, setup_path): pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]), name="rows\u05d0", ) - df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1) + df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1) # This used to fail, returning numpy strings instead of python strings. with ensure_clean_path(setup_path) as path: @@ -3683,7 +3683,7 @@ def test_append_to_multiple_dropna_false(self, setup_path): def test_append_to_multiple_min_itemsize(self, setup_path): # GH 11238 - df = pd.DataFrame( + df = DataFrame( { "IX": np.arange(1, 21), "Num": np.arange(1, 21), @@ -4136,7 +4136,7 @@ def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path): datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r" ) as store: result = store.select("df") - expected = pd.DataFrame( + expected = DataFrame( [[1, 2, 3, "D"]], columns=["A", "B", "C", "D"], index=pd.Index(["ABC"], name="INDEX_NAME"), @@ -4151,7 +4151,7 @@ def test_legacy_table_fixed_format_read_datetime_py2(self, datapath, setup_path) mode="r", ) as store: result = store.select("df") - expected = pd.DataFrame( + expected = DataFrame( [[pd.Timestamp("2020-02-06T18:00")]], columns=["A"], index=pd.Index(["date"]), @@ -4166,7 +4166,7 @@ def test_legacy_table_read_py2(self, datapath, setup_path): ) as store: result = store.select("table") - expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]}) + expected = DataFrame({"a": ["a", "b"], "b": [2, 3]}) tm.assert_frame_equal(expected, result) def test_copy(self, setup_path): @@ -4286,13 +4286,13 @@ def test_unicode_index(self, setup_path): def test_unicode_longer_encoded(self, setup_path): # GH 11234 char = "\u0394" - df = pd.DataFrame({"A": [char]}) + df = DataFrame({"A": [char]}) with ensure_clean_store(setup_path) as store: store.put("df", df, format="table", encoding="utf-8") result = store.get("df") tm.assert_frame_equal(result, df) - df = pd.DataFrame({"A": ["a", char], "B": ["b", "b"]}) + df = DataFrame({"A": ["a", char], "B": ["b", "b"]}) with ensure_clean_store(setup_path) as store: store.put("df", df, format="table", encoding="utf-8") result = store.get("df") @@ -4497,7 +4497,7 @@ def test_categorical_nan_only_columns(self, setup_path): # GH18413 # Check that read_hdf with categorical columns with NaN-only values can # be read back. - df = pd.DataFrame( + df = DataFrame( { "a": ["a", "b", "c", np.nan], "b": [np.nan, np.nan, np.nan, np.nan], @@ -4734,7 +4734,7 @@ def test_read_from_py_localpath(self, setup_path): def test_query_long_float_literal(self, setup_path): # GH 14241 - df = pd.DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) + df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) with ensure_clean_store(setup_path) as store: store.append("test", df, format="table", data_columns=True) @@ -4755,7 +4755,7 @@ def test_query_long_float_literal(self, setup_path): def test_query_compare_column_type(self, setup_path): # GH 15492 - df = pd.DataFrame( + df = DataFrame( { "date": ["2014-01-01", "2014-01-02"], "real_date": date_range("2014-01-01", periods=2), @@ -4824,11 +4824,11 @@ def test_read_py2_hdf_file_in_py3(self, datapath): # the file was generated in Python 2.7 like so: # - # df = pd.DataFrame([1.,2,3], index=pd.PeriodIndex( + # df = DataFrame([1.,2,3], index=pd.PeriodIndex( # ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B')) # df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p') - expected = pd.DataFrame( + expected = DataFrame( [1.0, 2, 3], index=pd.PeriodIndex(["2015-01-01", "2015-01-02", "2015-01-05"], freq="B"), ) @@ -4850,7 +4850,7 @@ def test_select_empty_where(self, where): # while reading from HDF store raises # "SyntaxError: only a single expression is allowed" - df = pd.DataFrame([1, 2, 3]) + df = DataFrame([1, 2, 3]) with ensure_clean_path("empty_where.h5") as path: with pd.HDFStore(path) as store: store.put("df", df, "t") @@ -4867,7 +4867,7 @@ def test_select_empty_where(self, where): def test_to_hdf_multiindex_extension_dtype(self, idx, setup_path): # GH 7775 mi = MultiIndex.from_arrays([idx, idx]) - df = pd.DataFrame(0, index=mi, columns=["a"]) + df = DataFrame(0, index=mi, columns=["a"]) with ensure_clean_path(setup_path) as path: with pytest.raises(NotImplementedError, match="Saving a MultiIndex"): df.to_hdf(path, "df") diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py index bcc5dcf9f5181..e137bc2dca48e 100644 --- a/pandas/tests/io/pytables/test_timezones.py +++ b/pandas/tests/io/pytables/test_timezones.py @@ -213,7 +213,7 @@ def test_append_with_timezones_pytz(setup_path): def test_roundtrip_tz_aware_index(setup_path): # GH 17618 time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern") - df = pd.DataFrame(data=[0], index=[time]) + df = DataFrame(data=[0], index=[time]) with ensure_clean_store(setup_path) as store: store.put("frame", df, format="fixed") @@ -224,7 +224,7 @@ def test_roundtrip_tz_aware_index(setup_path): def test_store_index_name_with_tz(setup_path): # GH 13884 - df = pd.DataFrame({"A": [1, 2]}) + df = DataFrame({"A": [1, 2]}) df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788]) df.index = df.index.tz_localize("UTC") df.index.name = "foo" @@ -387,7 +387,7 @@ def test_read_with_where_tz_aware_index(setup_path): periods = 10 dts = pd.date_range("20151201", periods=periods, freq="D", tz="UTC") mi = pd.MultiIndex.from_arrays([dts, range(periods)], names=["DATE", "NO"]) - expected = pd.DataFrame({"MYCOL": 0}, index=mi) + expected = DataFrame({"MYCOL": 0}, index=mi) key = "mykey" with ensure_clean_path(setup_path) as path: diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index b627e0e1cad54..a454d3b855cdf 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -38,11 +38,11 @@ def df(request): data_type = request.param if data_type == "delims": - return pd.DataFrame({"a": ['"a,\t"b|c', "d\tef´"], "b": ["hi'j", "k''lm"]}) + return DataFrame({"a": ['"a,\t"b|c', "d\tef´"], "b": ["hi'j", "k''lm"]}) elif data_type == "utf8": - return pd.DataFrame({"a": ["µasd", "Ωœ∑´"], "b": ["øπ∆˚¬", "œ∑´®"]}) + return DataFrame({"a": ["µasd", "Ωœ∑´"], "b": ["øπ∆˚¬", "œ∑´®"]}) elif data_type == "utf16": - return pd.DataFrame( + return DataFrame( {"a": ["\U0001f44d\U0001f44d", "\U0001f44d\U0001f44d"], "b": ["abc", "def"]} ) elif data_type == "string": @@ -61,7 +61,7 @@ def df(request): r_idx_names=[None], ) elif data_type == "nonascii": - return pd.DataFrame({"en": "in English".split(), "es": "en español".split()}) + return DataFrame({"en": "in English".split(), "es": "en español".split()}) elif data_type == "colwidth": _cw = get_option("display.max_colwidth") + 1 return tm.makeCustomDataframe( diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 32a15e6201037..d6506d434d6a7 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1490,10 +1490,10 @@ def test_datetime_with_timezone_roundtrip(self): def test_out_of_bounds_datetime(self): # GH 26761 - data = pd.DataFrame({"date": datetime(9999, 1, 1)}, index=[0]) + data = DataFrame({"date": datetime(9999, 1, 1)}, index=[0]) data.to_sql("test_datetime_obb", self.conn, index=False) result = sql.read_sql_table("test_datetime_obb", self.conn) - expected = pd.DataFrame([pd.NaT], columns=["date"]) + expected = DataFrame([pd.NaT], columns=["date"]) tm.assert_frame_equal(result, expected) def test_naive_datetimeindex_roundtrip(self): @@ -1820,7 +1820,7 @@ def main(connectable): def test_to_sql_with_negative_npinf(self, input): # GH 34431 - df = pd.DataFrame(input) + df = DataFrame(input) if self.flavor == "mysql": msg = "inf cannot be used with MySQL" diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 30926b2bd0241..d5c2ac755ee4d 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -32,7 +32,7 @@ @pytest.fixture() def mixed_frame(): - return pd.DataFrame( + return DataFrame( { "a": [1, 2, 3, 4], "b": [1.0, 3.0, 27.0, 81.0], @@ -385,7 +385,7 @@ def test_stata_doc_examples(self): def test_write_preserves_original(self): # 9795 np.random.seed(423) - df = pd.DataFrame(np.random.randn(5, 4), columns=list("abcd")) + df = DataFrame(np.random.randn(5, 4), columns=list("abcd")) df.loc[2, "a":"c"] = np.nan df_copy = df.copy() with tm.ensure_clean() as path: @@ -636,7 +636,7 @@ def test_105(self): dpath = os.path.join(self.dirpath, "S4_EDUC1.dta") df = pd.read_stata(dpath) df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]] - df0 = pd.DataFrame(df0) + df0 = DataFrame(df0) df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"] df0["clustnum"] = df0["clustnum"].astype(np.int16) df0["pri_schl"] = df0["pri_schl"].astype(np.int8) @@ -1358,7 +1358,7 @@ def test_default_date_conversion(self): dt.datetime(2012, 12, 21, 12, 21, 12, 21000), dt.datetime(1776, 7, 4, 7, 4, 7, 4000), ] - original = pd.DataFrame( + original = DataFrame( { "nums": [1.0, 2.0, 3.0], "strs": ["apple", "banana", "cherry"], @@ -1381,7 +1381,7 @@ def test_default_date_conversion(self): tm.assert_frame_equal(reread, direct) def test_unsupported_type(self): - original = pd.DataFrame({"a": [1 + 2j, 2 + 4j]}) + original = DataFrame({"a": [1 + 2j, 2 + 4j]}) msg = "Data type complex128 not supported" with pytest.raises(NotImplementedError, match=msg): @@ -1394,7 +1394,7 @@ def test_unsupported_datetype(self): dt.datetime(2012, 12, 21, 12, 21, 12, 21000), dt.datetime(1776, 7, 4, 7, 4, 7, 4000), ] - original = pd.DataFrame( + original = DataFrame( { "nums": [1.0, 2.0, 3.0], "strs": ["apple", "banana", "cherry"], @@ -1408,7 +1408,7 @@ def test_unsupported_datetype(self): original.to_stata(path, convert_dates={"dates": "tC"}) dates = pd.date_range("1-1-1990", periods=3, tz="Asia/Hong_Kong") - original = pd.DataFrame( + original = DataFrame( { "nums": [1.0, 2.0, 3.0], "strs": ["apple", "banana", "cherry"], @@ -1439,7 +1439,7 @@ def test_stata_111(self): # SAS when exporting to Stata format. We do not know of any # on-line documentation for this version. df = read_stata(self.dta24_111) - original = pd.DataFrame( + original = DataFrame( { "y": [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0], "x": [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6], @@ -1527,7 +1527,7 @@ def test_pickle_path_localpath(self): def test_value_labels_iterator(self, write_index): # GH 16923 d = {"A": ["B", "E", "C", "A", "E"]} - df = pd.DataFrame(data=d) + df = DataFrame(data=d) df["A"] = df["A"].astype("category") with tm.ensure_clean() as path: df.to_stata(path, write_index=write_index) @@ -1658,7 +1658,7 @@ def test_invalid_date_conversion(self): dt.datetime(2012, 12, 21, 12, 21, 12, 21000), dt.datetime(1776, 7, 4, 7, 4, 7, 4000), ] - original = pd.DataFrame( + original = DataFrame( { "nums": [1.0, 2.0, 3.0], "strs": ["apple", "banana", "cherry"], @@ -1709,14 +1709,14 @@ def test_unicode_dta_118(self): ["", "", "s", "", "s"], ["", "", " ", "", " "], ] - expected = pd.DataFrame(values, columns=columns) + expected = DataFrame(values, columns=columns) tm.assert_frame_equal(unicode_df, expected) def test_mixed_string_strl(self): # GH 23633 output = [{"mixed": "string" * 500, "number": 0}, {"mixed": None, "number": 1}] - output = pd.DataFrame(output) + output = DataFrame(output) output.number = output.number.astype("int32") with tm.ensure_clean() as path: @@ -1737,7 +1737,7 @@ def test_mixed_string_strl(self): @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) def test_all_none_exception(self, version): output = [{"none": "none", "number": 0}, {"none": None, "number": 1}] - output = pd.DataFrame(output) + output = DataFrame(output) output.loc[:, "none"] = None with tm.ensure_clean() as path: with pytest.raises(ValueError, match="Column `none` cannot be exported"): @@ -1791,7 +1791,7 @@ def test_encoding_latin1_118(self): assert len(w) == 151 assert w[0].message.args[0] == msg - expected = pd.DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"]) + expected = DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"]) tm.assert_frame_equal(encoded, expected) @pytest.mark.slow @@ -1808,7 +1808,7 @@ def test_stata_119(self): @pytest.mark.parametrize("version", [118, 119, None]) def test_utf8_writer(self, version): cat = pd.Categorical(["a", "β", "ĉ"], ordered=True) - data = pd.DataFrame( + data = DataFrame( [ [1.0, 1, "ᴬ", "ᴀ relatively long ŝtring"], [2.0, 2, "ᴮ", ""], diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index e666a8e412a52..ba59fc1a3cc3f 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -617,7 +617,7 @@ def test_subplots_timeseries_y_axis_not_supported(self): pd.to_datetime("2017-08-02 00:00:00"), ], } - testdata = pd.DataFrame(data) + testdata = DataFrame(data) ax_period = testdata.plot(x="numeric", y="period") assert ( ax_period.get_lines()[0].get_data()[1] == testdata["period"].values @@ -987,7 +987,7 @@ def test_bar_colors(self): tm.close() def test_bar_user_colors(self): - df = pd.DataFrame( + df = DataFrame( {"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]} ) # This should *only* work when `y` is specified, else @@ -1149,13 +1149,13 @@ def test_bar_nan(self): @pytest.mark.slow def test_bar_categorical(self): # GH 13019 - df1 = pd.DataFrame( + df1 = DataFrame( np.random.randn(6, 5), index=pd.Index(list("ABCDEF")), columns=pd.Index(list("abcde")), ) # categorical index must behave the same - df2 = pd.DataFrame( + df2 = DataFrame( np.random.randn(6, 5), index=pd.CategoricalIndex(list("ABCDEF")), columns=pd.CategoricalIndex(list("abcde")), @@ -1198,7 +1198,7 @@ def test_plot_scatter(self): def test_raise_error_on_datetime_time_data(self): # GH 8113, datetime.time type is not supported by matplotlib in scatter - df = pd.DataFrame(np.random.randn(10), columns=["a"]) + df = DataFrame(np.random.randn(10), columns=["a"]) df["dtime"] = pd.date_range(start="2014-01-01", freq="h", periods=10).time msg = "must be a string or a number, not 'datetime.time'" @@ -1209,19 +1209,19 @@ def test_scatterplot_datetime_data(self): # GH 30391 dates = pd.date_range(start=date(2019, 1, 1), periods=12, freq="W") vals = np.random.normal(0, 1, len(dates)) - df = pd.DataFrame({"dates": dates, "vals": vals}) + df = DataFrame({"dates": dates, "vals": vals}) _check_plot_works(df.plot.scatter, x="dates", y="vals") _check_plot_works(df.plot.scatter, x=0, y=1) def test_scatterplot_object_data(self): # GH 18755 - df = pd.DataFrame(dict(a=["A", "B", "C"], b=[2, 3, 4])) + df = DataFrame(dict(a=["A", "B", "C"], b=[2, 3, 4])) _check_plot_works(df.plot.scatter, x="a", y="b") _check_plot_works(df.plot.scatter, x=0, y=1) - df = pd.DataFrame(dict(a=["A", "B", "C"], b=["a", "b", "c"])) + df = DataFrame(dict(a=["A", "B", "C"], b=["a", "b", "c"])) _check_plot_works(df.plot.scatter, x="a", y="b") _check_plot_works(df.plot.scatter, x=0, y=1) @@ -1232,7 +1232,7 @@ def test_if_scatterplot_colorbar_affects_xaxis_visibility(self): # interfere with x-axis label and ticklabels with # ipython inline backend. random_array = np.random.random((1000, 3)) - df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"]) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) ax1 = df.plot.scatter(x="A label", y="B label") ax2 = df.plot.scatter(x="A label", y="B label", c="C label") @@ -1255,7 +1255,7 @@ def test_if_hexbin_xaxis_label_is_visible(self): # interfere with x-axis label and ticklabels with # ipython inline backend. random_array = np.random.random((1000, 3)) - df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"]) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) ax = df.plot.hexbin("A label", "B label", gridsize=12) assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels()) @@ -1267,7 +1267,7 @@ def test_if_scatterplot_colorbars_are_next_to_parent_axes(self): import matplotlib.pyplot as plt random_array = np.random.random((1000, 3)) - df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"]) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) fig, axes = plt.subplots(1, 2) df.plot.scatter("A label", "B label", c="C label", ax=axes[0]) @@ -1284,9 +1284,7 @@ def test_if_scatterplot_colorbars_are_next_to_parent_axes(self): @pytest.mark.slow def test_plot_scatter_with_categorical_data(self, x, y): # after fixing GH 18755, should be able to plot categorical data - df = pd.DataFrame( - {"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])} - ) + df = DataFrame({"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])}) _check_plot_works(df.plot.scatter, x=x, y=y) @@ -1345,7 +1343,7 @@ def test_plot_scatter_with_c(self): @pytest.mark.parametrize("cmap", [None, "Greys"]) def test_scatter_with_c_column_name_with_colors(self, cmap): # https://github.com/pandas-dev/pandas/issues/34316 - df = pd.DataFrame( + df = DataFrame( [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]], columns=["length", "width"], ) @@ -1383,7 +1381,7 @@ def test_scatter_colorbar_different_cmap(self): # GH 33389 import matplotlib.pyplot as plt - df = pd.DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]}) + df = DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]}) df["x2"] = df["x"] + 1 fig, ax = plt.subplots() @@ -1750,7 +1748,7 @@ def test_hist_df(self): def test_hist_weights(self, weights): # GH 33173 np.random.seed(0) - df = pd.DataFrame(dict(zip(["A", "B"], np.random.randn(2, 100)))) + df = DataFrame(dict(zip(["A", "B"], np.random.randn(2, 100)))) ax1 = _check_plot_works(df.plot, kind="hist", weights=weights) ax2 = _check_plot_works(df.plot, kind="hist") @@ -1991,9 +1989,7 @@ def test_df_legend_labels(self): def test_missing_marker_multi_plots_on_same_ax(self): # GH 18222 - df = pd.DataFrame( - data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"] - ) + df = DataFrame(data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"]) fig, ax = self.plt.subplots(nrows=1, ncols=3) # Left plot df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0]) @@ -2125,7 +2121,7 @@ def test_line_colors(self): @pytest.mark.slow def test_dont_modify_colors(self): colors = ["r", "g", "b"] - pd.DataFrame(np.random.rand(10, 2)).plot(color=colors) + DataFrame(np.random.rand(10, 2)).plot(color=colors) assert len(colors) == 3 @pytest.mark.slow @@ -3253,7 +3249,7 @@ def test_passed_bar_colors(self): color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)] colormap = mpl.colors.ListedColormap(color_tuples) - barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap) + barplot = DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap) assert color_tuples == [c.get_facecolor() for c in barplot.patches] def test_rcParams_bar_colors(self): @@ -3261,14 +3257,14 @@ def test_rcParams_bar_colors(self): color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)] with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}): - barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar") + barplot = DataFrame([[1, 2, 3]]).plot(kind="bar") assert color_tuples == [c.get_facecolor() for c in barplot.patches] @pytest.mark.parametrize("method", ["line", "barh", "bar"]) def test_secondary_axis_font_size(self, method): # GH: 12565 df = ( - pd.DataFrame(np.random.randn(15, 2), columns=list("AB")) + DataFrame(np.random.randn(15, 2), columns=list("AB")) .assign(C=lambda df: df.B.cumsum()) .assign(D=lambda df: df.C * 1.1) ) @@ -3284,7 +3280,7 @@ def test_secondary_axis_font_size(self, method): def test_x_string_values_ticks(self): # Test if string plot index have a fixed xtick position # GH: 7612, GH: 22334 - df = pd.DataFrame( + df = DataFrame( { "sales": [3, 2, 3], "visits": [20, 42, 28], @@ -3305,7 +3301,7 @@ def test_x_multiindex_values_ticks(self): # Test if multiindex plot index have a fixed xtick position # GH: 15912 index = pd.MultiIndex.from_product([[2012, 2013], [1, 2]]) - df = pd.DataFrame(np.random.randn(4, 2), columns=["A", "B"], index=index) + df = DataFrame(np.random.randn(4, 2), columns=["A", "B"], index=index) ax = df.plot() ax.set_xlim(-1, 4) xticklabels = [t.get_text() for t in ax.get_xticklabels()] @@ -3320,7 +3316,7 @@ def test_x_multiindex_values_ticks(self): def test_xlim_plot_line(self, kind): # test if xlim is set correctly in plot.line and plot.area # GH 27686 - df = pd.DataFrame([2, 4], index=[1, 2]) + df = DataFrame([2, 4], index=[1, 2]) ax = df.plot(kind=kind) xlims = ax.get_xlim() assert xlims[0] < 1 @@ -3332,7 +3328,7 @@ def test_xlim_plot_line_correctly_in_mixed_plot_type(self): fig, ax = self.plt.subplots() indexes = ["k1", "k2", "k3", "k4"] - df = pd.DataFrame( + df = DataFrame( { "s1": [1000, 2000, 1500, 2000], "s2": [900, 1400, 2000, 3000], @@ -3355,7 +3351,7 @@ def test_xlim_plot_line_correctly_in_mixed_plot_type(self): def test_subplots_sharex_false(self): # test when sharex is set to False, two plots should have different # labels, GH 25160 - df = pd.DataFrame(np.random.rand(10, 2)) + df = DataFrame(np.random.rand(10, 2)) df.iloc[5:, 1] = np.nan df.iloc[:5, 0] = np.nan @@ -3370,7 +3366,7 @@ def test_subplots_sharex_false(self): def test_plot_no_rows(self): # GH 27758 - df = pd.DataFrame(columns=["foo"], dtype=int) + df = DataFrame(columns=["foo"], dtype=int) assert df.empty ax = df.plot() assert len(ax.get_lines()) == 1 @@ -3379,13 +3375,13 @@ def test_plot_no_rows(self): assert len(line.get_ydata()) == 0 def test_plot_no_numeric_data(self): - df = pd.DataFrame(["a", "b", "c"]) + df = DataFrame(["a", "b", "c"]) with pytest.raises(TypeError): df.plot() def test_missing_markers_legend(self): # 14958 - df = pd.DataFrame(np.random.randn(8, 3), columns=["A", "B", "C"]) + df = DataFrame(np.random.randn(8, 3), columns=["A", "B", "C"]) ax = df.plot(y=["A"], marker="x", linestyle="solid") df.plot(y=["B"], marker="o", linestyle="dotted", ax=ax) df.plot(y=["C"], marker="<", linestyle="dotted", ax=ax) @@ -3395,7 +3391,7 @@ def test_missing_markers_legend(self): def test_missing_markers_legend_using_style(self): # 14563 - df = pd.DataFrame( + df = DataFrame( { "A": [1, 2, 3, 4, 5, 6], "B": [2, 4, 1, 3, 2, 4], @@ -3414,8 +3410,8 @@ def test_missing_markers_legend_using_style(self): def test_colors_of_columns_with_same_name(self): # ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136 # Creating a DataFrame with duplicate column labels and testing colors of them. - df = pd.DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]}) - df1 = pd.DataFrame({"a": [2, 4, 6]}) + df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]}) + df1 = DataFrame({"a": [2, 4, 6]}) df_concat = pd.concat([df, df1], axis=1) result = df_concat.plot() for legend, line in zip(result.get_legend().legendHandles, result.lines): @@ -3436,7 +3432,7 @@ def test_xlabel_ylabel_dataframe_single_plot( self, kind, index_name, old_label, new_label ): # GH 9093 - df = pd.DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) + df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) df.index.name = index_name # default is the ylabel is not shown and xlabel is index name @@ -3463,7 +3459,7 @@ def test_xlabel_ylabel_dataframe_plane_plot(self, kind, xlabel, ylabel): # GH 37001 xcol = "Type A" ycol = "Type B" - df = pd.DataFrame([[1, 2], [2, 5]], columns=[xcol, ycol]) + df = DataFrame([[1, 2], [2, 5]], columns=[xcol, ycol]) # default is the labels are column names ax = df.plot(kind=kind, x=xcol, y=ycol, xlabel=xlabel, ylabel=ylabel) @@ -3485,7 +3481,7 @@ def test_xlabel_ylabel_dataframe_subplots( self, kind, index_name, old_label, new_label ): # GH 9093 - df = pd.DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) + df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) df.index.name = index_name # default is the ylabel is not shown and xlabel is index name diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 02231f0431d9f..cc86436ee8fa9 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -918,7 +918,7 @@ def test_all_any_boolean(self): def test_any_axis1_bool_only(self): # GH#32432 - df = pd.DataFrame({"A": [True, False], "B": [1, 2]}) + df = DataFrame({"A": [True, False], "B": [1, 2]}) result = df.any(axis=1, bool_only=True) expected = Series([True, False]) tm.assert_series_equal(result, expected) @@ -1031,9 +1031,9 @@ def test_minmax_nat_series(self, nat_ser): @pytest.mark.parametrize( "nat_df", [ - pd.DataFrame([pd.NaT, pd.NaT]), - pd.DataFrame([pd.NaT, pd.Timedelta("nat")]), - pd.DataFrame([pd.Timedelta("nat"), pd.Timedelta("nat")]), + DataFrame([pd.NaT, pd.NaT]), + DataFrame([pd.NaT, pd.Timedelta("nat")]), + DataFrame([pd.Timedelta("nat"), pd.Timedelta("nat")]), ], ) def test_minmax_nat_dataframe(self, nat_df): diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 1b9145679fb12..7389fa31109f8 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -3,7 +3,6 @@ import numpy as np import pytest -import pandas as pd from pandas import DataFrame, Series import pandas._testing as tm from pandas.core.groupby.groupby import DataError @@ -158,7 +157,7 @@ def test_resample_count_empty_dataframe(freq, empty_frame_dti): index = _asfreq_compat(empty_frame_dti.index, freq) - expected = pd.DataFrame({"a": []}, dtype="int64", index=index) + expected = DataFrame({"a": []}, dtype="int64", index=index) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 07e47650d0c24..19e5a5dd7f5e7 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1442,14 +1442,14 @@ def test_groupby_with_dst_time_change(): [1478064900001000000, 1480037118776792000], tz="UTC" ).tz_convert("America/Chicago") - df = pd.DataFrame([1, 2], index=index) + df = DataFrame([1, 2], index=index) result = df.groupby(pd.Grouper(freq="1d")).last() expected_index_values = pd.date_range( "2016-11-02", "2016-11-24", freq="d", tz="America/Chicago" ) index = pd.DatetimeIndex(expected_index_values) - expected = pd.DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index) + expected = DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index) tm.assert_frame_equal(result, expected) @@ -1586,7 +1586,7 @@ def test_downsample_dst_at_midnight(): index = pd.date_range(start, end, freq="1H") index = index.tz_localize("UTC").tz_convert("America/Havana") data = list(range(len(index))) - dataframe = pd.DataFrame(data, index=index) + dataframe = DataFrame(data, index=index) result = dataframe.groupby(pd.Grouper(freq="1D")).mean() dti = date_range("2018-11-03", periods=3).tz_localize( @@ -1663,7 +1663,7 @@ def f(data, add_arg): tm.assert_series_equal(result, expected) # Testing dataframe - df = pd.DataFrame({"A": 1, "B": 2}, index=pd.date_range("2017", periods=10)) + df = DataFrame({"A": 1, "B": 2}, index=pd.date_range("2017", periods=10)) result = df.groupby("A").resample("D").agg(f, multiplier) expected = df.groupby("A").resample("D").mean().multiply(multiplier) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_deprecated.py b/pandas/tests/resample/test_deprecated.py index 24695a38a85ac..6523c53cfd2a1 100644 --- a/pandas/tests/resample/test_deprecated.py +++ b/pandas/tests/resample/test_deprecated.py @@ -41,7 +41,7 @@ def test_deprecating_on_loffset_and_base(): # GH 31809 idx = pd.date_range("2001-01-01", periods=4, freq="T") - df = pd.DataFrame(data=4 * [range(2)], index=idx, columns=["a", "b"]) + df = DataFrame(data=4 * [range(2)], index=idx, columns=["a", "b"]) with tm.assert_produces_warning(FutureWarning): pd.Grouper(freq="10s", base=0) diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index f5b655ebd416b..8bdaad285e3f6 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -566,7 +566,7 @@ def test_resample_with_dst_time_change(self): .tz_localize("UTC") .tz_convert("America/Chicago") ) - df = pd.DataFrame([1, 2], index=index) + df = DataFrame([1, 2], index=index) result = df.resample("12h", closed="right", label="right").last().ffill() expected_index_values = [ @@ -588,7 +588,7 @@ def test_resample_with_dst_time_change(self): "America/Chicago" ) index = pd.DatetimeIndex(index, freq="12h") - expected = pd.DataFrame( + expected = DataFrame( [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0], index=index, ) diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index dbb85c2f890bf..29f2aea1648ec 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -571,7 +571,7 @@ def test_agg_with_datetime_index_list_agg_func(col_name): # date parser. Some would result in OutOfBoundsError (ValueError) while # others would result in OverflowError when passed into Timestamp. # We catch these errors and move on to the correct branch. - df = pd.DataFrame( + df = DataFrame( list(range(200)), index=pd.date_range( start="2017-01-01", freq="15min", periods=200, tz="Europe/Berlin" @@ -579,7 +579,7 @@ def test_agg_with_datetime_index_list_agg_func(col_name): columns=[col_name], ) result = df.resample("1d").aggregate(["mean"]) - expected = pd.DataFrame( + expected = DataFrame( [47.5, 143.5, 195.5], index=pd.date_range( start="2017-01-01", freq="D", periods=3, tz="Europe/Berlin" diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index 53966392d3aff..ca31ef684257d 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -126,7 +126,7 @@ def test_getitem_multiple(): def test_groupby_resample_on_api_with_getitem(): # GH 17813 - df = pd.DataFrame( + df = DataFrame( {"id": list("aabbb"), "date": pd.date_range("1-1-2016", periods=5), "data": 1} ) exp = df.set_index("date").groupby("id").resample("2D")["data"].sum() @@ -351,7 +351,7 @@ def test_median_duplicate_columns(): def test_apply_to_one_column_of_df(): # GH: 36951 - df = pd.DataFrame( + df = DataFrame( {"col": range(10), "col1": range(10, 20)}, index=pd.date_range("2012-01-01", periods=10, freq="20min"), ) diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index c8c5fa47706fc..0832724110203 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -293,7 +293,7 @@ def test_groupby_resample_interpolate(): # GH 35325 d = {"price": [10, 11, 9], "volume": [50, 60, 50]} - df = pd.DataFrame(d) + df = DataFrame(d) df["week_starting"] = pd.date_range("01/01/2018", periods=3, freq="W") @@ -324,7 +324,7 @@ def test_groupby_resample_interpolate(): ], names=["volume", "week_starting"], ) - expected = pd.DataFrame( + expected = DataFrame( data={ "price": [ 10.0, diff --git a/pandas/tests/resample/test_timedelta.py b/pandas/tests/resample/test_timedelta.py index d0a0cf3cacd16..4783d806f8023 100644 --- a/pandas/tests/resample/test_timedelta.py +++ b/pandas/tests/resample/test_timedelta.py @@ -154,13 +154,13 @@ def test_resample_timedelta_edge_case(start, end, freq, resample_freq): def test_resample_with_timedelta_yields_no_empty_groups(): # GH 10603 - df = pd.DataFrame( + df = DataFrame( np.random.normal(size=(10000, 4)), index=pd.timedelta_range(start="0s", periods=10000, freq="3906250n"), ) result = df.loc["1s":, :].resample("3s").apply(lambda x: len(x)) - expected = pd.DataFrame( + expected = DataFrame( [[768.0] * 4] * 12 + [[528.0] * 4], index=pd.timedelta_range(start="1s", periods=13, freq="3s"), ) diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 4cc72e66353b3..8108cd14b872a 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -743,7 +743,7 @@ def test_join_multi_to_multi(self, join_type): def test_join_on_tz_aware_datetimeindex(self): # GH 23931, 26335 - df1 = pd.DataFrame( + df1 = DataFrame( { "date": pd.date_range( start="2018-01-01", periods=5, tz="America/Chicago" @@ -752,7 +752,7 @@ def test_join_on_tz_aware_datetimeindex(self): } ) - df2 = pd.DataFrame( + df2 = DataFrame( { "date": pd.date_range( start="2018-01-03", periods=5, tz="America/Chicago" diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 6968dc781b6e3..7d701d26185f1 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -122,10 +122,10 @@ def setup_method(self, method): def test_merge_inner_join_empty(self): # GH 15328 - df_empty = pd.DataFrame() - df_a = pd.DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64") + df_empty = DataFrame() + df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64") result = pd.merge(df_empty, df_a, left_index=True, right_index=True) - expected = pd.DataFrame({"a": []}, index=[], dtype="int64") + expected = DataFrame({"a": []}, index=[], dtype="int64") tm.assert_frame_equal(result, expected) def test_merge_common(self): @@ -136,7 +136,7 @@ def test_merge_common(self): def test_merge_non_string_columns(self): # https://github.com/pandas-dev/pandas/issues/17962 # Checks that method runs for non string column names - left = pd.DataFrame( + left = DataFrame( {0: [1, 0, 1, 0], 1: [0, 1, 0, 0], 2: [0, 0, 2, 0], 3: [1, 0, 0, 3]} ) @@ -430,10 +430,10 @@ def test_left_merge_empty_dataframe(self): ) def test_merge_left_empty_right_empty(self, join_type, kwarg): # GH 10824 - left = pd.DataFrame(columns=["a", "b", "c"]) - right = pd.DataFrame(columns=["x", "y", "z"]) + left = DataFrame(columns=["a", "b", "c"]) + right = DataFrame(columns=["x", "y", "z"]) - exp_in = pd.DataFrame( + exp_in = DataFrame( columns=["a", "b", "c", "x", "y", "z"], index=pd.Index([], dtype=object), dtype=object, @@ -444,10 +444,10 @@ def test_merge_left_empty_right_empty(self, join_type, kwarg): def test_merge_left_empty_right_notempty(self): # GH 10824 - left = pd.DataFrame(columns=["a", "b", "c"]) - right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["x", "y", "z"]) + left = DataFrame(columns=["a", "b", "c"]) + right = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["x", "y", "z"]) - exp_out = pd.DataFrame( + exp_out = DataFrame( { "a": np.array([np.nan] * 3, dtype=object), "b": np.array([np.nan] * 3, dtype=object), @@ -493,10 +493,10 @@ def check2(exp, kwarg): def test_merge_left_notempty_right_empty(self): # GH 10824 - left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]) - right = pd.DataFrame(columns=["x", "y", "z"]) + left = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]) + right = DataFrame(columns=["x", "y", "z"]) - exp_out = pd.DataFrame( + exp_out = DataFrame( { "a": [1, 4, 7], "b": [2, 5, 8], @@ -534,12 +534,12 @@ def check2(exp, kwarg): def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2): # GH 25183 - df = pd.DataFrame( + df = DataFrame( {"key": series_of_dtype, "value": series_of_dtype2}, columns=["key", "value"], ) df_empty = df[:0] - expected = pd.DataFrame( + expected = DataFrame( { "value_x": Series(dtype=df.dtypes["value"]), "key": Series(dtype=df.dtypes["key"]), @@ -552,15 +552,15 @@ def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2): def test_merge_all_na_column(self, series_of_dtype, series_of_dtype_all_na): # GH 25183 - df_left = pd.DataFrame( + df_left = DataFrame( {"key": series_of_dtype, "value": series_of_dtype_all_na}, columns=["key", "value"], ) - df_right = pd.DataFrame( + df_right = DataFrame( {"key": series_of_dtype, "value": series_of_dtype_all_na}, columns=["key", "value"], ) - expected = pd.DataFrame( + expected = DataFrame( { "key": series_of_dtype, "value_x": series_of_dtype_all_na, @@ -675,7 +675,7 @@ def test_join_append_timedeltas(self): def test_other_datetime_unit(self): # GH 13389 - df1 = pd.DataFrame({"entity_id": [101, 102]}) + df1 = DataFrame({"entity_id": [101, 102]}) s = Series([None, None], index=[101, 102], name="days") for dtype in [ @@ -694,7 +694,7 @@ def test_other_datetime_unit(self): result = df1.merge(df2, left_on="entity_id", right_index=True) - exp = pd.DataFrame( + exp = DataFrame( { "entity_id": [101, 102], "days": np.array(["nat", "nat"], dtype="datetime64[ns]"), @@ -706,7 +706,7 @@ def test_other_datetime_unit(self): @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"]) def test_other_timedelta_unit(self, unit): # GH 13389 - df1 = pd.DataFrame({"entity_id": [101, 102]}) + df1 = DataFrame({"entity_id": [101, 102]}) s = Series([None, None], index=[101, 102], name="days") dtype = f"m8[{unit}]" @@ -715,7 +715,7 @@ def test_other_timedelta_unit(self, unit): result = df1.merge(df2, left_on="entity_id", right_index=True) - exp = pd.DataFrame( + exp = DataFrame( {"entity_id": [101, 102], "days": np.array(["nat", "nat"], dtype=dtype)}, columns=["entity_id", "days"], ) @@ -748,13 +748,13 @@ def test_overlapping_columns_error_message(self): def test_merge_on_datetime64tz(self): # GH11405 - left = pd.DataFrame( + left = DataFrame( { "key": pd.date_range("20151010", periods=2, tz="US/Eastern"), "value": [1, 2], } ) - right = pd.DataFrame( + right = DataFrame( { "key": pd.date_range("20151011", periods=3, tz="US/Eastern"), "value": [1, 2, 3], @@ -771,13 +771,13 @@ def test_merge_on_datetime64tz(self): result = pd.merge(left, right, on="key", how="outer") tm.assert_frame_equal(result, expected) - left = pd.DataFrame( + left = DataFrame( { "key": [1, 2], "value": pd.date_range("20151010", periods=2, tz="US/Eastern"), } ) - right = pd.DataFrame( + right = DataFrame( { "key": [2, 3], "value": pd.date_range("20151011", periods=2, tz="US/Eastern"), @@ -800,7 +800,7 @@ def test_merge_on_datetime64tz(self): def test_merge_on_datetime64tz_empty(self): # https://github.com/pandas-dev/pandas/issues/25014 dtz = pd.DatetimeTZDtype(tz="UTC") - right = pd.DataFrame( + right = DataFrame( { "date": [pd.Timestamp("2018", tz=dtz.tz)], "value": [4.0], @@ -810,7 +810,7 @@ def test_merge_on_datetime64tz_empty(self): ) left = right[:0] result = left.merge(right, on="date") - expected = pd.DataFrame( + expected = DataFrame( { "value_x": Series(dtype=float), "date2_x": Series(dtype=dtz), @@ -824,12 +824,12 @@ def test_merge_on_datetime64tz_empty(self): def test_merge_datetime64tz_with_dst_transition(self): # GH 18885 - df1 = pd.DataFrame( + df1 = DataFrame( pd.date_range("2017-10-29 01:00", periods=4, freq="H", tz="Europe/Madrid"), columns=["date"], ) df1["value"] = 1 - df2 = pd.DataFrame( + df2 = DataFrame( { "date": pd.to_datetime( [ @@ -843,7 +843,7 @@ def test_merge_datetime64tz_with_dst_transition(self): ) df2["date"] = df2["date"].dt.tz_localize("UTC").dt.tz_convert("Europe/Madrid") result = pd.merge(df1, df2, how="outer", on="date") - expected = pd.DataFrame( + expected = DataFrame( { "date": pd.date_range( "2017-10-29 01:00", periods=7, freq="H", tz="Europe/Madrid" @@ -868,10 +868,10 @@ def test_merge_non_unique_period_index(self): tm.assert_frame_equal(result, expected) def test_merge_on_periods(self): - left = pd.DataFrame( + left = DataFrame( {"key": pd.period_range("20151010", periods=2, freq="D"), "value": [1, 2]} ) - right = pd.DataFrame( + right = DataFrame( { "key": pd.period_range("20151011", periods=3, freq="D"), "value": [1, 2, 3], @@ -888,10 +888,10 @@ def test_merge_on_periods(self): result = pd.merge(left, right, on="key", how="outer") tm.assert_frame_equal(result, expected) - left = pd.DataFrame( + left = DataFrame( {"key": [1, 2], "value": pd.period_range("20151010", periods=2, freq="D")} ) - right = pd.DataFrame( + right = DataFrame( {"key": [2, 3], "value": pd.period_range("20151011", periods=2, freq="D")} ) @@ -1132,7 +1132,7 @@ def test_validation(self): tm.assert_frame_equal(result, expected_3) # Dups on right - right_w_dups = right.append(pd.DataFrame({"a": ["e"], "c": ["moo"]}, index=[4])) + right_w_dups = right.append(DataFrame({"a": ["e"], "c": ["moo"]}, index=[4])) merge( left, right_w_dups, @@ -1156,7 +1156,7 @@ def test_validation(self): # Dups on left left_w_dups = left.append( - pd.DataFrame({"a": ["a"], "c": ["cow"]}, index=[3]), sort=True + DataFrame({"a": ["a"], "c": ["cow"]}, index=[3]), sort=True ) merge( left_w_dups, @@ -1242,7 +1242,7 @@ def test_validation(self): def test_merge_two_empty_df_no_division_error(self): # GH17776, PR #17846 - a = pd.DataFrame({"a": [], "b": [], "c": []}) + a = DataFrame({"a": [], "b": [], "c": []}) with np.errstate(divide="raise"): merge(a, a, on=("a", "b")) @@ -1285,10 +1285,10 @@ def test_merge_on_index_with_more_values(self, how, index, expected_index): # GH 24212 # pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that # -1 is interpreted as a missing value instead of the last element - df1 = pd.DataFrame({"a": [0, 1, 2], "key": [0, 1, 2]}, index=index) - df2 = pd.DataFrame({"b": [0, 1, 2, 3, 4, 5]}) + df1 = DataFrame({"a": [0, 1, 2], "key": [0, 1, 2]}, index=index) + df2 = DataFrame({"b": [0, 1, 2, 3, 4, 5]}) result = df1.merge(df2, left_on="key", right_index=True, how=how) - expected = pd.DataFrame( + expected = DataFrame( [ [0, 0, 0], [1, 1, 1], @@ -1306,10 +1306,10 @@ def test_merge_right_index_right(self): # Note: the expected output here is probably incorrect. # See https://github.com/pandas-dev/pandas/issues/17257 for more. # We include this as a regression test for GH-24897. - left = pd.DataFrame({"a": [1, 2, 3], "key": [0, 1, 1]}) - right = pd.DataFrame({"b": [1, 2, 3]}) + left = DataFrame({"a": [1, 2, 3], "key": [0, 1, 1]}) + right = DataFrame({"b": [1, 2, 3]}) - expected = pd.DataFrame( + expected = DataFrame( {"a": [1, 2, 3, None], "key": [0, 1, 1, 2], "b": [1, 2, 2, 3]}, columns=["a", "key", "b"], index=[0, 1, 2, np.nan], @@ -1320,30 +1320,26 @@ def test_merge_right_index_right(self): @pytest.mark.parametrize("how", ["left", "right"]) def test_merge_preserves_row_order(self, how): # GH 27453 - left_df = pd.DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]}) - right_df = pd.DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]}) + left_df = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]}) + right_df = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]}) result = left_df.merge(right_df, on=["animal", "max_speed"], how=how) if how == "right": - expected = pd.DataFrame( - {"animal": ["quetzal", "pig"], "max_speed": [80, 11]} - ) + expected = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]}) else: - expected = pd.DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]}) + expected = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]}) tm.assert_frame_equal(result, expected) def test_merge_take_missing_values_from_index_of_other_dtype(self): # GH 24212 - left = pd.DataFrame( + left = DataFrame( { "a": [1, 2, 3], "key": pd.Categorical(["a", "a", "b"], categories=list("abc")), } ) - right = pd.DataFrame( - {"b": [1, 2, 3]}, index=pd.CategoricalIndex(["a", "b", "c"]) - ) + right = DataFrame({"b": [1, 2, 3]}, index=pd.CategoricalIndex(["a", "b", "c"])) result = left.merge(right, left_on="key", right_index=True, how="right") - expected = pd.DataFrame( + expected = DataFrame( { "a": [1, 2, 3, None], "key": pd.Categorical(["a", "a", "b", "c"]), @@ -1356,10 +1352,10 @@ def test_merge_take_missing_values_from_index_of_other_dtype(self): def test_merge_readonly(self): # https://github.com/pandas-dev/pandas/issues/27943 - data1 = pd.DataFrame( + data1 = DataFrame( np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"] ) - data2 = pd.DataFrame( + data2 = DataFrame( np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"] ) @@ -1743,7 +1739,7 @@ def test_self_join_multiple_categories(self): # GH 16767 # non-duplicates should work with multiple categories m = 5 - df = pd.DataFrame( + df = DataFrame( { "a": ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"] * m, "b": ["t", "w", "x", "y", "z"] * 2 * m, @@ -1783,17 +1779,17 @@ def test_dtype_on_categorical_dates(self): # GH 16900 # dates should not be coerced to ints - df = pd.DataFrame( + df = DataFrame( [[date(2001, 1, 1), 1.1], [date(2001, 1, 2), 1.3]], columns=["date", "num2"] ) df["date"] = df["date"].astype("category") - df2 = pd.DataFrame( + df2 = DataFrame( [[date(2001, 1, 1), 1.3], [date(2001, 1, 3), 1.4]], columns=["date", "num4"] ) df2["date"] = df2["date"].astype("category") - expected_outer = pd.DataFrame( + expected_outer = DataFrame( [ [pd.Timestamp("2001-01-01"), 1.1, 1.3], [pd.Timestamp("2001-01-02"), 1.3, np.nan], @@ -1804,7 +1800,7 @@ def test_dtype_on_categorical_dates(self): result_outer = pd.merge(df, df2, how="outer", on=["date"]) tm.assert_frame_equal(result_outer, expected_outer) - expected_inner = pd.DataFrame( + expected_inner = DataFrame( [[pd.Timestamp("2001-01-01"), 1.1, 1.3]], columns=["date", "num2", "num4"] ) result_inner = pd.merge(df, df2, how="inner", on=["date"]) @@ -1824,21 +1820,19 @@ def test_merging_with_bool_or_int_cateorical_column( ): # GH 17187 # merging with a boolean/int categorical column - df1 = pd.DataFrame({"id": [1, 2, 3, 4], "cat": category_column}) + df1 = DataFrame({"id": [1, 2, 3, 4], "cat": category_column}) df1["cat"] = df1["cat"].astype(CDT(categories, ordered=ordered)) - df2 = pd.DataFrame({"id": [2, 4], "num": [1, 9]}) + df2 = DataFrame({"id": [2, 4], "num": [1, 9]}) result = df1.merge(df2) - expected = pd.DataFrame( - {"id": [2, 4], "cat": expected_categories, "num": [1, 9]} - ) + expected = DataFrame({"id": [2, 4], "cat": expected_categories, "num": [1, 9]}) expected["cat"] = expected["cat"].astype(CDT(categories, ordered=ordered)) tm.assert_frame_equal(expected, result) def test_merge_on_int_array(self): # GH 23020 - df = pd.DataFrame({"A": Series([1, 2, np.nan], dtype="Int64"), "B": 1}) + df = DataFrame({"A": Series([1, 2, np.nan], dtype="Int64"), "B": 1}) result = pd.merge(df, df, on="A") - expected = pd.DataFrame( + expected = DataFrame( {"A": Series([1, 2, np.nan], dtype="Int64"), "B_x": 1, "B_y": 1} ) tm.assert_frame_equal(result, expected) @@ -1950,7 +1944,7 @@ def test_merge_index_types(index): ) def test_merge_series(on, left_on, right_on, left_index, right_index, nm): # GH 21220 - a = pd.DataFrame( + a = DataFrame( {"A": [1, 2, 3, 4]}, index=pd.MultiIndex.from_product( [["a", "b"], [0, 1]], names=["outer", "inner"] @@ -1963,7 +1957,7 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm): ), name=nm, ) - expected = pd.DataFrame( + expected = DataFrame( {"A": [2, 4], "B": [1, 3]}, index=pd.MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]), ) @@ -2012,10 +2006,10 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm): ) def test_merge_suffix(col1, col2, kwargs, expected_cols): # issue: 24782 - a = pd.DataFrame({col1: [1, 2, 3]}) - b = pd.DataFrame({col2: [4, 5, 6]}) + a = DataFrame({col1: [1, 2, 3]}) + b = DataFrame({col2: [4, 5, 6]}) - expected = pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=expected_cols) + expected = DataFrame([[1, 4], [2, 5], [3, 6]], columns=expected_cols) result = a.merge(b, left_index=True, right_index=True, **kwargs) tm.assert_frame_equal(result, expected) @@ -2060,8 +2054,8 @@ def test_merge_duplicate_suffix(how, expected): ) def test_merge_suffix_error(col1, col2, suffixes): # issue: 24782 - a = pd.DataFrame({col1: [1, 2, 3]}) - b = pd.DataFrame({col2: [3, 4, 5]}) + a = DataFrame({col1: [1, 2, 3]}) + b = DataFrame({col2: [3, 4, 5]}) # TODO: might reconsider current raise behaviour, see issue 24782 msg = "columns overlap but no suffix specified" @@ -2071,8 +2065,8 @@ def test_merge_suffix_error(col1, col2, suffixes): @pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}]) def test_merge_suffix_warns(suffixes): - a = pd.DataFrame({"a": [1, 2, 3]}) - b = pd.DataFrame({"b": [3, 4, 5]}) + a = DataFrame({"a": [1, 2, 3]}) + b = DataFrame({"b": [3, 4, 5]}) with tm.assert_produces_warning(FutureWarning): pd.merge(a, b, left_index=True, right_index=True, suffixes={"left", "right"}) @@ -2086,8 +2080,8 @@ def test_merge_suffix_warns(suffixes): ], ) def test_merge_suffix_length_error(col1, col2, suffixes, msg): - a = pd.DataFrame({col1: [1, 2, 3]}) - b = pd.DataFrame({col2: [3, 4, 5]}) + a = DataFrame({col1: [1, 2, 3]}) + b = DataFrame({col2: [3, 4, 5]}) with pytest.raises(ValueError, match=msg): pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes) @@ -2176,9 +2170,9 @@ def test_merge_multiindex_columns(): numbers = ["1", "2", "3"] index = pd.MultiIndex.from_product((letters, numbers), names=["outer", "inner"]) - frame_x = pd.DataFrame(columns=index) + frame_x = DataFrame(columns=index) frame_x["id"] = "" - frame_y = pd.DataFrame(columns=index) + frame_y = DataFrame(columns=index) frame_y["id"] = "" l_suf = "_x" @@ -2190,7 +2184,7 @@ def test_merge_multiindex_columns(): expected_index = pd.MultiIndex.from_product( [expected_labels, numbers], names=["outer", "inner"] ) - expected = pd.DataFrame(columns=expected_index) + expected = DataFrame(columns=expected_index) expected["id"] = "" tm.assert_frame_equal(result, expected) @@ -2198,12 +2192,12 @@ def test_merge_multiindex_columns(): def test_merge_datetime_upcast_dtype(): # https://github.com/pandas-dev/pandas/issues/31208 - df1 = pd.DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]}) - df2 = pd.DataFrame( + df1 = DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]}) + df2 = DataFrame( {"y": ["1", "2", "3"], "z": pd.to_datetime(["2000", "2001", "2002"])} ) result = pd.merge(df1, df2, how="left", on="y") - expected = pd.DataFrame( + expected = DataFrame( { "x": ["a", "b", "c"], "y": ["1", "2", "4"], diff --git a/pandas/tests/reshape/merge/test_merge_ordered.py b/pandas/tests/reshape/merge/test_merge_ordered.py index e0063925a03e1..17f2f44f45fce 100644 --- a/pandas/tests/reshape/merge/test_merge_ordered.py +++ b/pandas/tests/reshape/merge/test_merge_ordered.py @@ -88,9 +88,9 @@ def test_empty_sequence_concat(self): with pytest.raises(ValueError, match=pattern): pd.concat(df_seq) - pd.concat([pd.DataFrame()]) - pd.concat([None, pd.DataFrame()]) - pd.concat([pd.DataFrame(), None]) + pd.concat([DataFrame()]) + pd.concat([None, DataFrame()]) + pd.concat([DataFrame(), None]) def test_doc_example(self): left = DataFrame( diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py index 61fdafa0c6db2..68096192c51ea 100644 --- a/pandas/tests/reshape/merge/test_multi.py +++ b/pandas/tests/reshape/merge/test_multi.py @@ -200,11 +200,9 @@ def test_merge_multiple_cols_with_mixed_cols_index(self): pd.MultiIndex.from_product([["A", "B"], [1, 2, 3]], names=["lev1", "lev2"]), name="Amount", ) - df = pd.DataFrame( - {"lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], "col": 0} - ) + df = DataFrame({"lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], "col": 0}) result = pd.merge(df, s.reset_index(), on=["lev1", "lev2"]) - expected = pd.DataFrame( + expected = DataFrame( { "lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], @@ -801,7 +799,7 @@ def test_single_common_level(self): [("K0", "X0"), ("K0", "X1"), ("K1", "X2")], names=["key", "X"] ) - left = pd.DataFrame( + left = DataFrame( {"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=index_left ) @@ -809,7 +807,7 @@ def test_single_common_level(self): [("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")], names=["key", "Y"] ) - right = pd.DataFrame( + right = DataFrame( {"C": ["C0", "C1", "C2", "C3"], "D": ["D0", "D1", "D2", "D3"]}, index=index_right, ) @@ -828,12 +826,12 @@ def test_join_multi_wrong_order(self): midx1 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) midx3 = pd.MultiIndex.from_tuples([(4, 1), (3, 2), (3, 1)], names=["b", "a"]) - left = pd.DataFrame(index=midx1, data={"x": [10, 20, 30, 40]}) - right = pd.DataFrame(index=midx3, data={"y": ["foo", "bar", "fing"]}) + left = DataFrame(index=midx1, data={"x": [10, 20, 30, 40]}) + right = DataFrame(index=midx3, data={"y": ["foo", "bar", "fing"]}) result = left.join(right) - expected = pd.DataFrame( + expected = DataFrame( index=midx1, data={"x": [10, 20, 30, 40], "y": ["fing", "foo", "bar", np.nan]}, ) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 340b50ed60ceb..a5b862adc8768 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -344,14 +344,14 @@ def test_concatlike_datetimetz_short(self, tz): # GH#7795 ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz) ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz) - df1 = pd.DataFrame(0, index=ix1, columns=["A", "B"]) - df2 = pd.DataFrame(0, index=ix2, columns=["A", "B"]) + df1 = DataFrame(0, index=ix1, columns=["A", "B"]) + df2 = DataFrame(0, index=ix2, columns=["A", "B"]) exp_idx = pd.DatetimeIndex( ["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"], tz=tz, ) - exp = pd.DataFrame(0, index=exp_idx, columns=["A", "B"]) + exp = DataFrame(0, index=exp_idx, columns=["A", "B"]) tm.assert_frame_equal(df1.append(df2), exp) tm.assert_frame_equal(pd.concat([df1, df2]), exp) @@ -849,14 +849,14 @@ def test_append_records(self): # rewrite sort fixture, since we also want to test default of None def test_append_sorts(self, sort): - df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"]) - df2 = pd.DataFrame({"a": [1, 2], "c": [3, 4]}, index=[2, 3]) + df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"]) + df2 = DataFrame({"a": [1, 2], "c": [3, 4]}, index=[2, 3]) with tm.assert_produces_warning(None): result = df1.append(df2, sort=sort) # for None / True - expected = pd.DataFrame( + expected = DataFrame( {"b": [1, 2, None, None], "a": [1, 2, 1, 2], "c": [None, None, 3, 4]}, columns=["a", "b", "c"], ) @@ -937,11 +937,11 @@ def test_append_same_columns_type(self, index): # GH18359 # df wider than ser - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index) ser_index = index[:2] ser = Series([7, 8], index=ser_index, name=2) result = df.append(ser) - expected = pd.DataFrame( + expected = DataFrame( [[1.0, 2.0, 3.0], [4, 5, 6], [7, 8, np.nan]], index=[0, 1, 2], columns=index ) tm.assert_frame_equal(result, expected) @@ -949,10 +949,10 @@ def test_append_same_columns_type(self, index): # ser wider than df ser_index = index index = index[:2] - df = pd.DataFrame([[1, 2], [4, 5]], columns=index) + df = DataFrame([[1, 2], [4, 5]], columns=index) ser = Series([7, 8, 9], index=ser_index, name=2) result = df.append(ser) - expected = pd.DataFrame( + expected = DataFrame( [[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]], index=[0, 1, 2], columns=ser_index, @@ -969,13 +969,13 @@ def test_append_different_columns_types(self, df_columns, series_index): # See also test 'test_append_different_columns_types_raises' below # for errors raised when appending - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns) ser = Series([7, 8, 9], index=series_index, name=2) result = df.append(ser) idx_diff = ser.index.difference(df_columns) combined_columns = Index(df_columns.tolist()).append(idx_diff) - expected = pd.DataFrame( + expected = DataFrame( [ [1.0, 2.0, 3.0, np.nan, np.nan, np.nan], [4, 5, 6, np.nan, np.nan, np.nan], @@ -1004,7 +1004,7 @@ def test_append_different_columns_types_raises( # See also test 'test_append_different_columns_types' above for # appending without raising. - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append) ser = Series([7, 8, 9], index=index_cannot_append_with_other, name=2) msg = ( r"Expected tuple, got (int|long|float|str|" @@ -1015,9 +1015,7 @@ def test_append_different_columns_types_raises( with pytest.raises(TypeError, match=msg): df.append(ser) - df = pd.DataFrame( - [[1, 2, 3], [4, 5, 6]], columns=index_cannot_append_with_other - ) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_cannot_append_with_other) ser = Series([7, 8, 9], index=index_can_append, name=2) with pytest.raises(TypeError, match=msg): @@ -1112,19 +1110,19 @@ def test_append_empty_frame_to_series_with_dateutil_tz(self): def test_append_empty_tz_frame_with_datetime64ns(self): # https://github.com/pandas-dev/pandas/issues/35460 - df = pd.DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") # pd.NaT gets inferred as tz-naive, so append result is tz-naive result = df.append({"a": pd.NaT}, ignore_index=True) - expected = pd.DataFrame({"a": [pd.NaT]}).astype("datetime64[ns]") + expected = DataFrame({"a": [pd.NaT]}).astype("datetime64[ns]") tm.assert_frame_equal(result, expected) # also test with typed value to append - df = pd.DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") result = df.append( Series({"a": pd.NaT}, dtype="datetime64[ns]"), ignore_index=True ) - expected = pd.DataFrame({"a": [pd.NaT]}).astype("datetime64[ns]") + expected = DataFrame({"a": [pd.NaT]}).astype("datetime64[ns]") tm.assert_frame_equal(result, expected) @@ -1316,13 +1314,12 @@ def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out): pd.Index(["c", "d", "e"], name=name_in3), ] frames = [ - pd.DataFrame({c: [0, 1, 2]}, index=i) - for i, c in zip(indices, ["x", "y", "z"]) + DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"]) ] result = pd.concat(frames, axis=1) exp_ind = pd.Index(["a", "b", "c", "d", "e"], name=name_out) - expected = pd.DataFrame( + expected = DataFrame( { "x": [0, 1, 2, np.nan, np.nan], "y": [np.nan, 0, 1, 2, np.nan], @@ -1383,15 +1380,13 @@ def test_concat_multiindex_with_tz(self): def test_concat_multiindex_with_none_in_index_names(self): # GH 15787 index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None]) - df = pd.DataFrame({"col": range(5)}, index=index, dtype=np.int32) + df = DataFrame({"col": range(5)}, index=index, dtype=np.int32) result = concat([df, df], keys=[1, 2], names=["level2"]) index = pd.MultiIndex.from_product( [[1, 2], [1], range(5)], names=["level2", "level1", None] ) - expected = pd.DataFrame( - {"col": list(range(5)) * 2}, index=index, dtype=np.int32 - ) + expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32) tm.assert_frame_equal(result, expected) result = concat([df, df[:2]], keys=[1, 2], names=["level2"]) @@ -1400,7 +1395,7 @@ def test_concat_multiindex_with_none_in_index_names(self): no_name = list(range(5)) + list(range(2)) tuples = list(zip(level2, level1, no_name)) index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None]) - expected = pd.DataFrame({"col": no_name}, index=index, dtype=np.int32) + expected = DataFrame({"col": no_name}, index=index, dtype=np.int32) tm.assert_frame_equal(result, expected) def test_concat_keys_and_levels(self): @@ -1876,9 +1871,9 @@ def test_concat_bug_3602(self): def test_concat_inner_join_empty(self): # GH 15328 - df_empty = pd.DataFrame() - df_a = pd.DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64") - df_expected = pd.DataFrame({"a": []}, index=[], dtype="int64") + df_empty = DataFrame() + df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64") + df_expected = DataFrame({"a": []}, index=[], dtype="int64") for how, expected in [("inner", df_expected), ("outer", df_a)]: result = pd.concat([df_a, df_empty], axis=1, join=how) @@ -2029,40 +2024,40 @@ def test_concat_tz_series(self): # see gh-12217 and gh-12306 # Concatenating two UTC times - first = pd.DataFrame([[datetime(2016, 1, 1)]]) + first = DataFrame([[datetime(2016, 1, 1)]]) first[0] = first[0].dt.tz_localize("UTC") - second = pd.DataFrame([[datetime(2016, 1, 2)]]) + second = DataFrame([[datetime(2016, 1, 2)]]) second[0] = second[0].dt.tz_localize("UTC") result = pd.concat([first, second]) assert result[0].dtype == "datetime64[ns, UTC]" # Concatenating two London times - first = pd.DataFrame([[datetime(2016, 1, 1)]]) + first = DataFrame([[datetime(2016, 1, 1)]]) first[0] = first[0].dt.tz_localize("Europe/London") - second = pd.DataFrame([[datetime(2016, 1, 2)]]) + second = DataFrame([[datetime(2016, 1, 2)]]) second[0] = second[0].dt.tz_localize("Europe/London") result = pd.concat([first, second]) assert result[0].dtype == "datetime64[ns, Europe/London]" # Concatenating 2+1 London times - first = pd.DataFrame([[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]]) + first = DataFrame([[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]]) first[0] = first[0].dt.tz_localize("Europe/London") - second = pd.DataFrame([[datetime(2016, 1, 3)]]) + second = DataFrame([[datetime(2016, 1, 3)]]) second[0] = second[0].dt.tz_localize("Europe/London") result = pd.concat([first, second]) assert result[0].dtype == "datetime64[ns, Europe/London]" # Concat'ing 1+2 London times - first = pd.DataFrame([[datetime(2016, 1, 1)]]) + first = DataFrame([[datetime(2016, 1, 1)]]) first[0] = first[0].dt.tz_localize("Europe/London") - second = pd.DataFrame([[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]]) + second = DataFrame([[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]]) second[0] = second[0].dt.tz_localize("Europe/London") result = pd.concat([first, second]) @@ -2105,13 +2100,11 @@ def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s): # GH 12396 # tz-naive - first = pd.DataFrame([[pd.NaT], [pd.NaT]]).apply( - lambda x: x.dt.tz_localize(tz1) - ) - second = pd.DataFrame([s]).apply(lambda x: x.dt.tz_localize(tz2)) + first = DataFrame([[pd.NaT], [pd.NaT]]).apply(lambda x: x.dt.tz_localize(tz1)) + second = DataFrame([s]).apply(lambda x: x.dt.tz_localize(tz2)) result = pd.concat([first, second], axis=0) - expected = pd.DataFrame(Series([pd.NaT, pd.NaT, s], index=[0, 1, 0])) + expected = DataFrame(Series([pd.NaT, pd.NaT, s], index=[0, 1, 0])) expected = expected.apply(lambda x: x.dt.tz_localize(tz2)) if tz1 != tz2: expected = expected.astype(object) @@ -2123,9 +2116,9 @@ def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s): def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2): # GH 12396 - first = pd.DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)) - second = pd.DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1]) - expected = pd.DataFrame( + first = DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)) + second = DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1]) + expected = DataFrame( { 0: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1), 1: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2), @@ -2141,7 +2134,7 @@ def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2): # tz-naive first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1) - second = pd.DataFrame( + second = DataFrame( [ [pd.Timestamp("2015/01/01", tz=tz2)], [pd.Timestamp("2016/01/01", tz=tz2)], @@ -2149,7 +2142,7 @@ def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2): index=[2, 3], ) - expected = pd.DataFrame( + expected = DataFrame( [ pd.NaT, pd.NaT, @@ -2167,13 +2160,13 @@ def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2): def test_concat_NaT_dataframes(self, tz): # GH 12396 - first = pd.DataFrame([[pd.NaT], [pd.NaT]]) + first = DataFrame([[pd.NaT], [pd.NaT]]) first = first.apply(lambda x: x.dt.tz_localize(tz)) - second = pd.DataFrame( + second = DataFrame( [[pd.Timestamp("2015/01/01", tz=tz)], [pd.Timestamp("2016/01/01", tz=tz)]], index=[2, 3], ) - expected = pd.DataFrame( + expected = DataFrame( [ pd.NaT, pd.NaT, @@ -2228,7 +2221,7 @@ def test_concat_empty_series(self): s1 = Series([1, 2, 3], name="x") s2 = Series(name="y", dtype="float64") res = pd.concat([s1, s2], axis=1) - exp = pd.DataFrame( + exp = DataFrame( {"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]}, index=pd.Index([0, 1, 2], dtype="O"), ) @@ -2245,7 +2238,7 @@ def test_concat_empty_series(self): s1 = Series([1, 2, 3], name="x") s2 = Series(name=None, dtype="float64") res = pd.concat([s1, s2], axis=1) - exp = pd.DataFrame( + exp = DataFrame( {"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]}, columns=["x", 0], index=pd.Index([0, 1, 2], dtype="O"), @@ -2276,7 +2269,7 @@ def test_default_index(self): s2 = Series([4, 5, 6], name="y") res = pd.concat([s1, s2], axis=1, ignore_index=True) assert isinstance(res.columns, pd.RangeIndex) - exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]]) + exp = DataFrame([[1, 4], [2, 5], [3, 6]]) # use check_index_type=True to check the result have # RangeIndex (default index) tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) @@ -2286,20 +2279,20 @@ def test_default_index(self): s2 = Series([4, 5, 6]) res = pd.concat([s1, s2], axis=1, ignore_index=False) assert isinstance(res.columns, pd.RangeIndex) - exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]]) + exp = DataFrame([[1, 4], [2, 5], [3, 6]]) exp.columns = pd.RangeIndex(2) tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) # is_dataframe and ignore_index - df1 = pd.DataFrame({"A": [1, 2], "B": [5, 6]}) - df2 = pd.DataFrame({"A": [3, 4], "B": [7, 8]}) + df1 = DataFrame({"A": [1, 2], "B": [5, 6]}) + df2 = DataFrame({"A": [3, 4], "B": [7, 8]}) res = pd.concat([df1, df2], axis=0, ignore_index=True) - exp = pd.DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"]) + exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"]) tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) res = pd.concat([df1, df2], axis=1, ignore_index=True) - exp = pd.DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]]) + exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]]) tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) def test_concat_multiindex_rangeindex(self): @@ -2322,10 +2315,10 @@ def test_concat_multiindex_dfs_with_deepcopy(self): from copy import deepcopy example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]]) - example_dataframe1 = pd.DataFrame([0], index=example_multiindex1) + example_dataframe1 = DataFrame([0], index=example_multiindex1) example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]]) - example_dataframe2 = pd.DataFrame([1], index=example_multiindex2) + example_dataframe2 = DataFrame([1], index=example_multiindex2) example_dict = {"s1": example_dataframe1, "s2": example_dataframe2} expected_index = pd.MultiIndex( @@ -2333,7 +2326,7 @@ def test_concat_multiindex_dfs_with_deepcopy(self): codes=[[0, 1], [0, 0], [0, 1]], names=["testname", None, None], ) - expected = pd.DataFrame([[0], [1]], index=expected_index) + expected = DataFrame([[0], [1]], index=expected_index) result_copy = pd.concat(deepcopy(example_dict), names=["testname"]) tm.assert_frame_equal(result_copy, expected) result_no_copy = pd.concat(example_dict, names=["testname"]) @@ -2506,7 +2499,7 @@ def test_concat_categoricalindex(self): result = pd.concat([a, b, c], axis=1) exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories) - exp = pd.DataFrame( + exp = DataFrame( { 0: [1, 1, np.nan, np.nan], 1: [np.nan, 2, 2, np.nan], @@ -2519,10 +2512,8 @@ def test_concat_categoricalindex(self): def test_concat_order(self): # GH 17344 - dfs = [pd.DataFrame(index=range(3), columns=["a", 1, None])] - dfs += [ - pd.DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100) - ] + dfs = [DataFrame(index=range(3), columns=["a", 1, None])] + dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)] result = pd.concat(dfs, sort=True).columns expected = dfs[0].columns @@ -2532,8 +2523,8 @@ def test_concat_datetime_timezone(self): # GH 18523 idx1 = pd.date_range("2011-01-01", periods=3, freq="H", tz="Europe/Paris") idx2 = pd.date_range(start=idx1[0], end=idx1[-1], freq="H") - df1 = pd.DataFrame({"a": [1, 2, 3]}, index=idx1) - df2 = pd.DataFrame({"b": [1, 2, 3]}, index=idx2) + df1 = DataFrame({"a": [1, 2, 3]}, index=idx1) + df2 = DataFrame({"b": [1, 2, 3]}, index=idx2) result = pd.concat([df1, df2], axis=1) exp_idx = ( @@ -2549,14 +2540,14 @@ def test_concat_datetime_timezone(self): .tz_convert("Europe/Paris") ) - expected = pd.DataFrame( + expected = DataFrame( [[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"] ) tm.assert_frame_equal(result, expected) idx3 = pd.date_range("2011-01-01", periods=3, freq="H", tz="Asia/Tokyo") - df3 = pd.DataFrame({"b": [1, 2, 3]}, index=idx3) + df3 = DataFrame({"b": [1, 2, 3]}, index=idx3) result = pd.concat([df1, df3], axis=1) exp_idx = DatetimeIndex( @@ -2570,7 +2561,7 @@ def test_concat_datetime_timezone(self): ] ) - expected = pd.DataFrame( + expected = DataFrame( [ [np.nan, 1], [np.nan, 2], @@ -2589,7 +2580,7 @@ def test_concat_datetime_timezone(self): result = pd.concat( [df1.resample("H").mean(), df2.resample("H").mean()], sort=True ) - expected = pd.DataFrame( + expected = DataFrame( {"a": [1, 2, 3] + [np.nan] * 3, "b": [np.nan] * 3 + [1, 2, 3]}, index=idx1.append(idx1), ) @@ -2645,9 +2636,9 @@ def test_concat_will_upcast(dt, pdt): def test_concat_empty_and_non_empty_frame_regression(): # GH 18178 regression test - df1 = pd.DataFrame({"foo": [1]}) - df2 = pd.DataFrame({"foo": []}) - expected = pd.DataFrame({"foo": [1.0]}) + df1 = DataFrame({"foo": [1]}) + df2 = DataFrame({"foo": []}) + expected = DataFrame({"foo": [1.0]}) result = pd.concat([df1, df2]) tm.assert_frame_equal(result, expected) @@ -2664,11 +2655,11 @@ def test_concat_empty_and_non_empty_series_regression(): def test_concat_sorts_columns(sort): # GH-4588 - df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"]) - df2 = pd.DataFrame({"a": [3, 4], "c": [5, 6]}) + df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"]) + df2 = DataFrame({"a": [3, 4], "c": [5, 6]}) # for sort=True/None - expected = pd.DataFrame( + expected = DataFrame( {"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]}, columns=["a", "b", "c"], ) @@ -2683,11 +2674,11 @@ def test_concat_sorts_columns(sort): def test_concat_sorts_index(sort): - df1 = pd.DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"]) - df2 = pd.DataFrame({"b": [1, 2]}, index=["a", "b"]) + df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"]) + df2 = DataFrame({"b": [1, 2]}, index=["a", "b"]) # For True/None - expected = pd.DataFrame( + expected = DataFrame( {"a": [2, 3, 1], "b": [1, 2, None]}, index=["a", "b", "c"], columns=["a", "b"] ) if sort is False: @@ -2701,15 +2692,15 @@ def test_concat_sorts_index(sort): def test_concat_inner_sort(sort): # https://github.com/pandas-dev/pandas/pull/20613 - df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"]) - df2 = pd.DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4]) + df1 = DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"]) + df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4]) with tm.assert_produces_warning(None): # unset sort should *not* warn for inner join # since that never sorted result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True) - expected = pd.DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"]) + expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"]) if sort is True: expected = expected[["a", "b"]] tm.assert_frame_equal(result, expected) @@ -2717,9 +2708,9 @@ def test_concat_inner_sort(sort): def test_concat_aligned_sort(): # GH-4588 - df = pd.DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"]) + df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"]) result = pd.concat([df, df], sort=True, ignore_index=True) - expected = pd.DataFrame( + expected = DataFrame( {"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]}, columns=["a", "b", "c"], ) @@ -2733,8 +2724,8 @@ def test_concat_aligned_sort(): def test_concat_aligned_sort_does_not_raise(): # GH-4588 # We catch TypeErrors from sorting internally and do not re-raise. - df = pd.DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"]) - expected = pd.DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"]) + df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"]) + expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"]) result = pd.concat([df, df], ignore_index=True, sort=True) tm.assert_frame_equal(result, expected) @@ -2769,10 +2760,10 @@ def test_concat_categorical_unchanged(): # GH-12007 # test fix for when concat on categorical and float # coerces dtype categorical -> float - df = pd.DataFrame(Series(["a", "b", "c"], dtype="category", name="A")) + df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A")) ser = Series([0, 1, 2], index=[0, 1, 3], name="B") result = pd.concat([df, ser], axis=1) - expected = pd.DataFrame( + expected = DataFrame( { "A": Series(["a", "b", "c", np.nan], dtype="category"), "B": Series([0, 1, np.nan, 2], dtype="float"), @@ -2786,21 +2777,21 @@ def test_concat_datetimeindex_freq(): # Monotonic index result dr = pd.date_range("01-Jan-2013", periods=100, freq="50L", tz="UTC") data = list(range(100)) - expected = pd.DataFrame(data, index=dr) + expected = DataFrame(data, index=dr) result = pd.concat([expected[:50], expected[50:]]) tm.assert_frame_equal(result, expected) # Non-monotonic index result result = pd.concat([expected[50:], expected[:50]]) - expected = pd.DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50])) + expected = DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50])) expected.index._data.freq = None tm.assert_frame_equal(result, expected) def test_concat_empty_df_object_dtype(): # GH 9149 - df_1 = pd.DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]}) - df_2 = pd.DataFrame(columns=df_1.columns) + df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]}) + df_2 = DataFrame(columns=df_1.columns) result = pd.concat([df_1, df_2], axis=0) expected = df_1.astype(object) tm.assert_frame_equal(result, expected) @@ -2809,7 +2800,7 @@ def test_concat_empty_df_object_dtype(): def test_concat_sparse(): # GH 23557 a = Series(SparseArray([0, 1, 2])) - expected = pd.DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype( + expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype( pd.SparseDtype(np.int64, 0) ) result = pd.concat([a, a], axis=1) @@ -2906,24 +2897,24 @@ def test_concat_preserves_subclass(obj): def test_concat_frame_axis0_extension_dtypes(): # preserve extension dtype (through common_dtype mechanism) - df1 = pd.DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")}) - df2 = pd.DataFrame({"a": np.array([4, 5, 6])}) + df1 = DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")}) + df2 = DataFrame({"a": np.array([4, 5, 6])}) result = pd.concat([df1, df2], ignore_index=True) - expected = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64") + expected = DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64") tm.assert_frame_equal(result, expected) result = pd.concat([df2, df1], ignore_index=True) - expected = pd.DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64") + expected = DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64") tm.assert_frame_equal(result, expected) def test_concat_preserves_extension_int64_dtype(): # GH 24768 - df_a = pd.DataFrame({"a": [-1]}, dtype="Int64") - df_b = pd.DataFrame({"b": [1]}, dtype="Int64") + df_a = DataFrame({"a": [-1]}, dtype="Int64") + df_b = DataFrame({"b": [1]}, dtype="Int64") result = pd.concat([df_a, df_b], ignore_index=True) - expected = pd.DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64") + expected = DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64") tm.assert_frame_equal(result, expected) @@ -3111,20 +3102,20 @@ def test_concat_tz_NaT(self, t1): def test_concat_tz_not_aligned(self): # GH#22796 ts = pd.to_datetime([1, 2]).tz_localize("UTC") - a = pd.DataFrame({"A": ts}) - b = pd.DataFrame({"A": ts, "B": ts}) + a = DataFrame({"A": ts}) + b = DataFrame({"A": ts, "B": ts}) result = pd.concat([a, b], sort=True, ignore_index=True) - expected = pd.DataFrame( + expected = DataFrame( {"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)} ) tm.assert_frame_equal(result, expected) def test_concat_tuple_keys(self): # GH#14438 - df1 = pd.DataFrame(np.ones((2, 2)), columns=list("AB")) - df2 = pd.DataFrame(np.ones((3, 2)) * 2, columns=list("AB")) + df1 = DataFrame(np.ones((2, 2)), columns=list("AB")) + df2 = DataFrame(np.ones((3, 2)) * 2, columns=list("AB")) results = pd.concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")]) - expected = pd.DataFrame( + expected = DataFrame( { "A": { ("bee", "bah", 0): 1.0, @@ -3146,10 +3137,10 @@ def test_concat_tuple_keys(self): def test_concat_named_keys(self): # GH#14252 - df = pd.DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]}) + df = DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]}) index = Index(["a", "b"], name="baz") concatted_named_from_keys = pd.concat([df, df], keys=index) - expected_named = pd.DataFrame( + expected_named = DataFrame( {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]}, index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]), ) @@ -3162,7 +3153,7 @@ def test_concat_named_keys(self): tm.assert_frame_equal(concatted_named_from_names, expected_named) concatted_unnamed = pd.concat([df, df], keys=index_no_name) - expected_unnamed = pd.DataFrame( + expected_unnamed = DataFrame( {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]}, index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]), ) @@ -3170,11 +3161,11 @@ def test_concat_named_keys(self): def test_concat_axis_parameter(self): # GH#14369 - df1 = pd.DataFrame({"A": [0.1, 0.2]}, index=range(2)) - df2 = pd.DataFrame({"A": [0.3, 0.4]}, index=range(2)) + df1 = DataFrame({"A": [0.1, 0.2]}, index=range(2)) + df2 = DataFrame({"A": [0.3, 0.4]}, index=range(2)) # Index/row/0 DataFrame - expected_index = pd.DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1]) + expected_index = DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1]) concatted_index = pd.concat([df1, df2], axis="index") tm.assert_frame_equal(concatted_index, expected_index) @@ -3186,7 +3177,7 @@ def test_concat_axis_parameter(self): tm.assert_frame_equal(concatted_0, expected_index) # Columns/1 DataFrame - expected_columns = pd.DataFrame( + expected_columns = DataFrame( [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"] ) @@ -3212,7 +3203,7 @@ def test_concat_axis_parameter(self): tm.assert_series_equal(concatted_0_series, expected_index_series) # Columns/1 Series - expected_columns_series = pd.DataFrame( + expected_columns_series = DataFrame( [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1] ) @@ -3228,7 +3219,7 @@ def test_concat_axis_parameter(self): def test_concat_numerical_names(self): # GH#15262, GH#12223 - df = pd.DataFrame( + df = DataFrame( {"col": range(9)}, dtype="int32", index=( @@ -3238,7 +3229,7 @@ def test_concat_numerical_names(self): ), ) result = pd.concat((df.iloc[:2, :], df.iloc[-2:, :])) - expected = pd.DataFrame( + expected = DataFrame( {"col": [0, 1, 7, 8]}, dtype="int32", index=pd.MultiIndex.from_tuples( @@ -3249,11 +3240,11 @@ def test_concat_numerical_names(self): def test_concat_astype_dup_col(self): # GH#23049 - df = pd.DataFrame([{"a": "b"}]) + df = DataFrame([{"a": "b"}]) df = pd.concat([df, df], axis=1) result = df.astype("category") - expected = pd.DataFrame( + expected = DataFrame( np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"] ).astype("category") tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 79879ef346f53..99beff39e8e09 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -15,7 +15,7 @@ def setup_method(self, method): self.var_name = "var" self.value_name = "val" - self.df1 = pd.DataFrame( + self.df1 = DataFrame( [ [1.067683, -1.110463, 0.20867], [-1.321405, 0.368915, -1.055342], @@ -310,7 +310,7 @@ def test_melt_missing_columns_raises(self): # attempted with column names absent from the dataframe # Generate data - df = pd.DataFrame(np.random.randn(5, 4), columns=list("abcd")) + df = DataFrame(np.random.randn(5, 4), columns=list("abcd")) # Try to melt with missing `value_vars` column name msg = "The following '{Var}' are not present in the DataFrame: {Col}" @@ -634,7 +634,7 @@ class TestWideToLong: def test_simple(self): np.random.seed(123) x = np.random.randn(3) - df = pd.DataFrame( + df = DataFrame( { "A1970": {0: "a", 1: "b", 2: "c"}, "A1980": {0: "d", 1: "e", 2: "f"}, @@ -658,7 +658,7 @@ def test_simple(self): def test_stubs(self): # GH9204 - df = pd.DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]]) + df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]]) df.columns = ["id", "inc1", "inc2", "edu1", "edu2"] stubs = ["inc", "edu"] @@ -671,7 +671,7 @@ def test_separating_character(self): # GH14779 np.random.seed(123) x = np.random.randn(3) - df = pd.DataFrame( + df = DataFrame( { "A.1970": {0: "a", 1: "b", 2: "c"}, "A.1980": {0: "d", 1: "e", 2: "f"}, @@ -696,7 +696,7 @@ def test_separating_character(self): def test_escapable_characters(self): np.random.seed(123) x = np.random.randn(3) - df = pd.DataFrame( + df = DataFrame( { "A(quarterly)1970": {0: "a", 1: "b", 2: "c"}, "A(quarterly)1980": {0: "d", 1: "e", 2: "f"}, @@ -722,7 +722,7 @@ def test_escapable_characters(self): def test_unbalanced(self): # test that we can have a varying amount of time variables - df = pd.DataFrame( + df = DataFrame( { "A2010": [1.0, 2.0], "A2011": [3.0, 4.0], @@ -738,14 +738,14 @@ def test_unbalanced(self): "id": [0, 0, 1, 1], "year": [2010, 2011, 2010, 2011], } - expected = pd.DataFrame(exp_data) + expected = DataFrame(exp_data) expected = expected.set_index(["id", "year"])[["X", "A", "B"]] result = wide_to_long(df, ["A", "B"], i="id", j="year") tm.assert_frame_equal(result, expected) def test_character_overlap(self): # Test we handle overlapping characters in both id_vars and value_vars - df = pd.DataFrame( + df = DataFrame( { "A11": ["a11", "a22", "a33"], "A12": ["a21", "a22", "a23"], @@ -758,7 +758,7 @@ def test_character_overlap(self): } ) df["id"] = df.index - expected = pd.DataFrame( + expected = DataFrame( { "BBBX": [91, 92, 93, 91, 92, 93], "BBBZ": [91, 92, 93, 91, 92, 93], @@ -776,7 +776,7 @@ def test_character_overlap(self): def test_invalid_separator(self): # if an invalid separator is supplied a empty data frame is returned sep = "nope!" - df = pd.DataFrame( + df = DataFrame( { "A2010": [1.0, 2.0], "A2011": [3.0, 4.0], @@ -795,7 +795,7 @@ def test_invalid_separator(self): "A": [], "B": [], } - expected = pd.DataFrame(exp_data).astype({"year": "int"}) + expected = DataFrame(exp_data).astype({"year": "int"}) expected = expected.set_index(["id", "year"])[ ["X", "A2010", "A2011", "B2010", "A", "B"] ] @@ -806,7 +806,7 @@ def test_invalid_separator(self): def test_num_string_disambiguation(self): # Test that we can disambiguate number value_vars from # string value_vars - df = pd.DataFrame( + df = DataFrame( { "A11": ["a11", "a22", "a33"], "A12": ["a21", "a22", "a23"], @@ -819,7 +819,7 @@ def test_num_string_disambiguation(self): } ) df["id"] = df.index - expected = pd.DataFrame( + expected = DataFrame( { "Arating": [91, 92, 93, 91, 92, 93], "Arating_old": [91, 92, 93, 91, 92, 93], @@ -839,7 +839,7 @@ def test_num_string_disambiguation(self): def test_invalid_suffixtype(self): # If all stubs names end with a string, but a numeric suffix is # assumed, an empty data frame is returned - df = pd.DataFrame( + df = DataFrame( { "Aone": [1.0, 2.0], "Atwo": [3.0, 4.0], @@ -858,7 +858,7 @@ def test_invalid_suffixtype(self): "A": [], "B": [], } - expected = pd.DataFrame(exp_data).astype({"year": "int"}) + expected = DataFrame(exp_data).astype({"year": "int"}) expected = expected.set_index(["id", "year"]) expected.index = expected.index.set_levels([0, 1], level=0) @@ -867,7 +867,7 @@ def test_invalid_suffixtype(self): def test_multiple_id_columns(self): # Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm - df = pd.DataFrame( + df = DataFrame( { "famid": [1, 1, 1, 2, 2, 2, 3, 3, 3], "birth": [1, 2, 3, 1, 2, 3, 1, 2, 3], @@ -875,7 +875,7 @@ def test_multiple_id_columns(self): "ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9], } ) - expected = pd.DataFrame( + expected = DataFrame( { "ht": [ 2.8, @@ -909,7 +909,7 @@ def test_multiple_id_columns(self): def test_non_unique_idvars(self): # GH16382 # Raise an error message if non unique id vars (i) are passed - df = pd.DataFrame( + df = DataFrame( {"A_A1": [1, 2, 3, 4, 5], "B_B1": [1, 2, 3, 4, 5], "x": [1, 1, 1, 1, 1]} ) msg = "the id variables need to uniquely identify each row" @@ -917,7 +917,7 @@ def test_non_unique_idvars(self): wide_to_long(df, ["A_A", "B_B"], i="x", j="colname") def test_cast_j_int(self): - df = pd.DataFrame( + df = DataFrame( { "actor_1": ["CCH Pounder", "Johnny Depp", "Christoph Waltz"], "actor_2": ["Joel David Moore", "Orlando Bloom", "Rory Kinnear"], @@ -927,7 +927,7 @@ def test_cast_j_int(self): } ) - expected = pd.DataFrame( + expected = DataFrame( { "actor": [ "CCH Pounder", @@ -956,7 +956,7 @@ def test_cast_j_int(self): tm.assert_frame_equal(result, expected) def test_identical_stubnames(self): - df = pd.DataFrame( + df = DataFrame( { "A2010": [1.0, 2.0], "A2011": [3.0, 4.0], @@ -969,7 +969,7 @@ def test_identical_stubnames(self): wide_to_long(df, ["A", "B"], i="A", j="colname") def test_nonnumeric_suffix(self): - df = pd.DataFrame( + df = DataFrame( { "treatment_placebo": [1.0, 2.0], "treatment_test": [3.0, 4.0], @@ -977,7 +977,7 @@ def test_nonnumeric_suffix(self): "A": ["X1", "X2"], } ) - expected = pd.DataFrame( + expected = DataFrame( { "A": ["X1", "X1", "X2", "X2"], "colname": ["placebo", "test", "placebo", "test"], @@ -992,7 +992,7 @@ def test_nonnumeric_suffix(self): tm.assert_frame_equal(result, expected) def test_mixed_type_suffix(self): - df = pd.DataFrame( + df = DataFrame( { "A": ["X1", "X2"], "result_1": [0, 9], @@ -1001,7 +1001,7 @@ def test_mixed_type_suffix(self): "treatment_foo": [3.0, 4.0], } ) - expected = pd.DataFrame( + expected = DataFrame( { "A": ["X1", "X2", "X1", "X2"], "colname": ["1", "1", "foo", "foo"], @@ -1015,7 +1015,7 @@ def test_mixed_type_suffix(self): tm.assert_frame_equal(result, expected) def test_float_suffix(self): - df = pd.DataFrame( + df = DataFrame( { "treatment_1.1": [1.0, 2.0], "treatment_2.1": [3.0, 4.0], @@ -1024,7 +1024,7 @@ def test_float_suffix(self): "A": ["X1", "X2"], } ) - expected = pd.DataFrame( + expected = DataFrame( { "A": ["X1", "X1", "X1", "X1", "X2", "X2", "X2", "X2"], "colname": [1, 1.1, 1.2, 2.1, 1, 1.1, 1.2, 2.1], @@ -1060,8 +1060,8 @@ def test_warn_of_column_name_value(self): # GH34731 # raise a warning if the resultant value column name matches # a name in the dataframe already (default name is "value") - df = pd.DataFrame({"col": list("ABC"), "value": range(10, 16, 2)}) - expected = pd.DataFrame( + df = DataFrame({"col": list("ABC"), "value": range(10, 16, 2)}) + expected = DataFrame( [["A", "col", "A"], ["B", "col", "B"], ["C", "col", "C"]], columns=["value", "variable", "value"], ) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 943a7d0a3cf86..cfe969b5f61bb 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -110,7 +110,7 @@ def test_pivot_table(self, observed): def test_pivot_table_categorical_observed_equal(self, observed): # issue #24923 - df = pd.DataFrame( + df = DataFrame( {"col1": list("abcde"), "col2": list("fghij"), "col3": [1, 2, 3, 4, 5]} ) @@ -229,7 +229,7 @@ def test_pivot_table_dropna_categoricals(self, dropna): def test_pivot_with_non_observable_dropna(self, dropna): # gh-21133 - df = pd.DataFrame( + df = DataFrame( { "A": pd.Categorical( [np.nan, "low", "high", "low", "high"], @@ -241,7 +241,7 @@ def test_pivot_with_non_observable_dropna(self, dropna): ) result = df.pivot_table(index="A", values="B", dropna=dropna) - expected = pd.DataFrame( + expected = DataFrame( {"B": [2, 3]}, index=pd.Index( pd.Categorical.from_codes( @@ -254,7 +254,7 @@ def test_pivot_with_non_observable_dropna(self, dropna): tm.assert_frame_equal(result, expected) # gh-21378 - df = pd.DataFrame( + df = DataFrame( { "A": pd.Categorical( ["left", "low", "high", "low", "high"], @@ -266,7 +266,7 @@ def test_pivot_with_non_observable_dropna(self, dropna): ) result = df.pivot_table(index="A", values="B", dropna=dropna) - expected = pd.DataFrame( + expected = DataFrame( {"B": [2, 3, 0]}, index=pd.Index( pd.Categorical.from_codes( @@ -395,16 +395,14 @@ def test_pivot_no_values(self): idx = pd.DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-01-02", "2011-01-01", "2011-01-02"] ) - df = pd.DataFrame({"A": [1, 2, 3, 4, 5]}, index=idx) + df = DataFrame({"A": [1, 2, 3, 4, 5]}, index=idx) res = df.pivot_table(index=df.index.month, columns=df.index.day) exp_columns = pd.MultiIndex.from_tuples([("A", 1), ("A", 2)]) - exp = pd.DataFrame( - [[2.5, 4.0], [2.0, np.nan]], index=[1, 2], columns=exp_columns - ) + exp = DataFrame([[2.5, 4.0], [2.0, np.nan]], index=[1, 2], columns=exp_columns) tm.assert_frame_equal(res, exp) - df = pd.DataFrame( + df = DataFrame( { "A": [1, 2, 3, 4, 5], "dt": pd.date_range("2011-01-01", freq="D", periods=5), @@ -416,13 +414,13 @@ def test_pivot_no_values(self): ) exp_columns = pd.MultiIndex.from_tuples([("A", pd.Timestamp("2011-01-31"))]) exp_columns.names = [None, "dt"] - exp = pd.DataFrame([3.25, 2.0], index=[1, 2], columns=exp_columns) + exp = DataFrame([3.25, 2.0], index=[1, 2], columns=exp_columns) tm.assert_frame_equal(res, exp) res = df.pivot_table( index=pd.Grouper(freq="A"), columns=pd.Grouper(key="dt", freq="M") ) - exp = pd.DataFrame( + exp = DataFrame( [3], index=pd.DatetimeIndex(["2011-12-31"], freq="A"), columns=exp_columns ) tm.assert_frame_equal(res, exp) @@ -577,7 +575,7 @@ def test_pivot_with_tz(self, method): def test_pivot_tz_in_values(self): # GH 14948 - df = pd.DataFrame( + df = DataFrame( [ { "uid": "aa", @@ -612,7 +610,7 @@ def test_pivot_tz_in_values(self): columns=[mins], aggfunc=np.min, ) - expected = pd.DataFrame( + expected = DataFrame( [ [ pd.Timestamp("2016-08-12 08:00:00-0700", tz="US/Pacific"), @@ -714,7 +712,7 @@ def test_pivot_periods_with_margins(self): @pytest.mark.parametrize("method", [True, False]) def test_pivot_with_list_like_values(self, values, method): # issue #17160 - df = pd.DataFrame( + df = DataFrame( { "foo": ["one", "one", "one", "two", "two", "two"], "bar": ["A", "B", "C", "A", "B", "C"], @@ -750,7 +748,7 @@ def test_pivot_with_list_like_values(self, values, method): @pytest.mark.parametrize("method", [True, False]) def test_pivot_with_list_like_values_nans(self, values, method): # issue #17160 - df = pd.DataFrame( + df = DataFrame( { "foo": ["one", "one", "one", "two", "two", "two"], "bar": ["A", "B", "C", "A", "B", "C"], @@ -783,9 +781,7 @@ def test_pivot_with_list_like_values_nans(self, values, method): def test_pivot_columns_none_raise_error(self): # GH 30924 - df = pd.DataFrame( - {"col1": ["a", "b", "c"], "col2": [1, 2, 3], "col3": [1, 2, 3]} - ) + df = DataFrame({"col1": ["a", "b", "c"], "col2": [1, 2, 3], "col3": [1, 2, 3]}) msg = r"pivot\(\) missing 1 required argument: 'columns'" with pytest.raises(TypeError, match=msg): df.pivot(index="col1", values="col3") @@ -835,7 +831,7 @@ def test_pivot_with_multiindex(self, method): @pytest.mark.parametrize("method", [True, False]) def test_pivot_with_tuple_of_values(self, method): # issue #17160 - df = pd.DataFrame( + df = DataFrame( { "foo": ["one", "one", "one", "two", "two", "two"], "bar": ["A", "B", "C", "A", "B", "C"], @@ -941,7 +937,7 @@ def test_margin_with_only_columns_defined( self, columns, aggfunc, values, expected_columns ): # GH 31016 - df = pd.DataFrame( + df = DataFrame( { "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], @@ -962,9 +958,7 @@ def test_margin_with_only_columns_defined( ) result = df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc) - expected = pd.DataFrame( - values, index=Index(["D", "E"]), columns=expected_columns - ) + expected = DataFrame(values, index=Index(["D", "E"]), columns=expected_columns) tm.assert_frame_equal(result, expected) @@ -1655,9 +1649,7 @@ def test_monthly(self): rng = date_range("1/1/2000", "12/31/2004", freq="M") ts = Series(np.random.randn(len(rng)), index=rng) - annual = pivot_table( - pd.DataFrame(ts), index=ts.index.year, columns=ts.index.month - ) + annual = pivot_table(DataFrame(ts), index=ts.index.year, columns=ts.index.month) annual.columns = annual.columns.droplevel(0) month = ts.index.month @@ -1690,7 +1682,7 @@ def test_pivot_table_with_iterator_values(self): def test_pivot_table_margins_name_with_aggfunc_list(self): # GH 13354 margins_name = "Weekly" - costs = pd.DataFrame( + costs = DataFrame( { "item": ["bacon", "cheese", "bacon", "cheese"], "cost": [2.5, 4.5, 3.2, 3.3], @@ -1714,17 +1706,17 @@ def test_pivot_table_margins_name_with_aggfunc_list(self): ("max", "cost", margins_name), ] cols = pd.MultiIndex.from_tuples(tups, names=[None, None, "day"]) - expected = pd.DataFrame(table.values, index=ix, columns=cols) + expected = DataFrame(table.values, index=ix, columns=cols) tm.assert_frame_equal(table, expected) @pytest.mark.xfail(reason="GH#17035 (np.mean of ints is casted back to ints)") def test_categorical_margins(self, observed): # GH 10989 - df = pd.DataFrame( + df = DataFrame( {"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2} ) - expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]]) + expected = DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]]) expected.index = Index([0, 1, "All"], name="y") expected.columns = Index([0, 1, "All"], name="z") @@ -1733,11 +1725,11 @@ def test_categorical_margins(self, observed): @pytest.mark.xfail(reason="GH#17035 (np.mean of ints is casted back to ints)") def test_categorical_margins_category(self, observed): - df = pd.DataFrame( + df = DataFrame( {"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2} ) - expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]]) + expected = DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]]) expected.index = Index([0, 1, "All"], name="y") expected.columns = Index([0, 1, "All"], name="z") @@ -1748,7 +1740,7 @@ def test_categorical_margins_category(self, observed): def test_margins_casted_to_float(self, observed): # GH 24893 - df = pd.DataFrame( + df = DataFrame( { "A": [2, 4, 6, 8], "B": [1, 4, 5, 8], @@ -1758,7 +1750,7 @@ def test_margins_casted_to_float(self, observed): ) result = pd.pivot_table(df, index="D", margins=True) - expected = pd.DataFrame( + expected = DataFrame( {"A": [3, 7, 5], "B": [2.5, 6.5, 4.5], "C": [2, 5, 3.5]}, index=pd.Index(["X", "Y", "All"], name="D"), ) @@ -1768,7 +1760,7 @@ def test_pivot_with_categorical(self, observed, ordered): # gh-21370 idx = [np.nan, "low", "high", "low", np.nan] col = [np.nan, "A", "B", np.nan, "A"] - df = pd.DataFrame( + df = DataFrame( { "In": pd.Categorical(idx, categories=["low", "high"], ordered=ordered), "Col": pd.Categorical(col, categories=["A", "B"], ordered=ordered), @@ -1782,9 +1774,7 @@ def test_pivot_with_categorical(self, observed, ordered): expected_cols = pd.CategoricalIndex(["A", "B"], ordered=ordered, name="Col") - expected = pd.DataFrame( - data=[[2.0, np.nan], [np.nan, 3.0]], columns=expected_cols - ) + expected = DataFrame(data=[[2.0, np.nan], [np.nan, 3.0]], columns=expected_cols) expected.index = Index( pd.Categorical( ["low", "high"], categories=["low", "high"], ordered=ordered @@ -1797,7 +1787,7 @@ def test_pivot_with_categorical(self, observed, ordered): # case with columns/value result = df.pivot_table(columns="Col", values="Val", observed=observed) - expected = pd.DataFrame( + expected = DataFrame( data=[[3.5, 3.0]], columns=expected_cols, index=Index(["Val"]) ) @@ -1805,7 +1795,7 @@ def test_pivot_with_categorical(self, observed, ordered): def test_categorical_aggfunc(self, observed): # GH 9534 - df = pd.DataFrame( + df = DataFrame( {"C1": ["A", "B", "C", "C"], "C2": ["a", "a", "b", "b"], "V": [1, 2, 3, 4]} ) df["C1"] = df["C1"].astype("category") @@ -1818,14 +1808,14 @@ def test_categorical_aggfunc(self, observed): ) expected_columns = pd.Index(["a", "b"], name="C2") expected_data = np.array([[1, 0], [1, 0], [0, 2]], dtype=np.int64) - expected = pd.DataFrame( + expected = DataFrame( expected_data, index=expected_index, columns=expected_columns ) tm.assert_frame_equal(result, expected) def test_categorical_pivot_index_ordering(self, observed): # GH 8731 - df = pd.DataFrame( + df = DataFrame( { "Sales": [100, 120, 220], "Month": ["January", "January", "January"], @@ -1859,7 +1849,7 @@ def test_categorical_pivot_index_ordering(self, observed): months, categories=months, ordered=False, name="Month" ) expected_data = [[320, 120]] + [[0, 0]] * 11 - expected = pd.DataFrame( + expected = DataFrame( expected_data, index=expected_index, columns=expected_columns ) if observed: @@ -1898,12 +1888,12 @@ def test_pivot_table_not_series(self): def test_pivot_margins_name_unicode(self): # issue #13292 greek = "\u0394\u03bf\u03ba\u03b9\u03bc\u03ae" - frame = pd.DataFrame({"foo": [1, 2, 3]}) + frame = DataFrame({"foo": [1, 2, 3]}) table = pd.pivot_table( frame, index=["foo"], aggfunc=len, margins=True, margins_name=greek ) index = pd.Index([1, 2, 3, greek], dtype="object", name="foo") - expected = pd.DataFrame(index=index) + expected = DataFrame(index=index) tm.assert_frame_equal(table, expected) def test_pivot_string_as_func(self): @@ -2001,7 +1991,7 @@ def test_pivot_number_of_levels_larger_than_int32(self): def test_pivot_table_aggfunc_dropna(self, dropna): # GH 22159 - df = pd.DataFrame( + df = DataFrame( { "fruit": ["apple", "peach", "apple"], "size": [1, 1, 2], @@ -2027,7 +2017,7 @@ def ret_none(x): [["ret_sum", "ret_none", "ret_one"], ["apple", "peach"]], names=[None, "fruit"], ) - expected = pd.DataFrame(data, index=["size", "taste"], columns=col) + expected = DataFrame(data, index=["size", "taste"], columns=col) if dropna: expected = expected.dropna(axis="columns") @@ -2036,7 +2026,7 @@ def ret_none(x): def test_pivot_table_aggfunc_scalar_dropna(self, dropna): # GH 22159 - df = pd.DataFrame( + df = DataFrame( {"A": ["one", "two", "one"], "x": [3, np.nan, 2], "y": [1, np.nan, np.nan]} ) @@ -2044,7 +2034,7 @@ def test_pivot_table_aggfunc_scalar_dropna(self, dropna): data = [[2.5, np.nan], [1, np.nan]] col = pd.Index(["one", "two"], name="A") - expected = pd.DataFrame(data, index=["x", "y"], columns=col) + expected = DataFrame(data, index=["x", "y"], columns=col) if dropna: expected = expected.dropna(axis="columns") @@ -2053,7 +2043,7 @@ def test_pivot_table_aggfunc_scalar_dropna(self, dropna): def test_pivot_table_empty_aggfunc(self): # GH 9186 - df = pd.DataFrame( + df = DataFrame( { "A": [2, 2, 3, 3, 2], "id": [5, 6, 7, 8, 9], @@ -2062,7 +2052,7 @@ def test_pivot_table_empty_aggfunc(self): } ) result = df.pivot_table(index="A", columns="D", values="id", aggfunc=np.size) - expected = pd.DataFrame() + expected = DataFrame() tm.assert_frame_equal(result, expected) def test_pivot_table_no_column_raises(self): @@ -2070,8 +2060,6 @@ def test_pivot_table_no_column_raises(self): def agg(l): return np.mean(l) - foo = pd.DataFrame( - {"X": [0, 0, 1, 1], "Y": [0, 1, 0, 1], "Z": [10, 20, 30, 40]} - ) + foo = DataFrame({"X": [0, 0, 1, 1], "Y": [0, 1, 0, 1], "Z": [10, 20, 30, 40]}) with pytest.raises(KeyError, match="notpresent"): foo.pivot_table("notpresent", "X", "Y", aggfunc=agg) diff --git a/pandas/tests/series/apply/test_series_apply.py b/pandas/tests/series/apply/test_series_apply.py index 61ebd2fcb3a27..2627e8b8608a9 100644 --- a/pandas/tests/series/apply/test_series_apply.py +++ b/pandas/tests/series/apply/test_series_apply.py @@ -156,7 +156,7 @@ def f(x): def test_apply_dict_depr(self): - tsdf = pd.DataFrame( + tsdf = DataFrame( np.random.randn(10, 3), columns=["A", "B", "C"], index=pd.date_range("1/1/2000", periods=10), @@ -566,7 +566,7 @@ def test_map_dict_with_tuple_keys(self): from being mapped properly. """ # GH 18496 - df = pd.DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]}) + df = DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]}) label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"} df["labels"] = df["a"].map(label_mappings) diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 2e3d67786afdc..392f352711210 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -109,7 +109,7 @@ def test_slicing_datetimes(): tm.assert_frame_equal(result, expected) # duplicates - df = pd.DataFrame( + df = DataFrame( np.arange(5.0, dtype="float64"), index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]], ) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 3d927a80a157c..8c53ed85a20b3 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -263,7 +263,7 @@ def test_setitem_ambiguous_keyerror(): def test_getitem_dataframe(): rng = list(range(10)) s = Series(10, index=rng) - df = pd.DataFrame(rng, index=rng) + df = DataFrame(rng, index=rng) msg = ( "Indexing a Series with DataFrame is not supported, " "use the appropriate DataFrame column" diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py index e1d0bced55d98..4c2bf4683d17d 100644 --- a/pandas/tests/series/methods/test_append.py +++ b/pandas/tests/series/methods/test_append.py @@ -63,7 +63,7 @@ def test_append_tuples(self): def test_append_dataframe_raises(self): # GH 31413 - df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}) + df = DataFrame({"A": [1, 2], "B": [3, 4]}) msg = "to_append should be a Series or list/tuple of Series, got DataFrame" with pytest.raises(TypeError, match=msg): diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py index d8099e84a324d..38955ea7f06c4 100644 --- a/pandas/tests/series/methods/test_unstack.py +++ b/pandas/tests/series/methods/test_unstack.py @@ -73,7 +73,7 @@ def test_unstack_tuplename_in_multiindex(): ser = Series(1, index=idx) result = ser.unstack(("A", "a")) - expected = pd.DataFrame( + expected = DataFrame( [[1, 1, 1], [1, 1, 1], [1, 1, 1]], columns=pd.MultiIndex.from_tuples([("a",), ("b",), ("c",)], names=[("A", "a")]), index=pd.Index([1, 2, 3], name=("B", "b")), @@ -112,7 +112,7 @@ def test_unstack_mixed_type_name_in_multiindex( ser = Series(1, index=idx) result = ser.unstack(unstack_idx) - expected = pd.DataFrame( + expected = DataFrame( expected_values, columns=expected_columns, index=expected_index ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 491b3a62b7d73..1ca639e85d913 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -712,7 +712,7 @@ def test_constructor_datelike_coercion(self): wing1 = "2T15 4H19".split() wing2 = "416 4T20".split() mat = pd.to_datetime("2016-01-22 2019-09-07".split()) - df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly) + df = DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly) result = df.loc["3T19"] assert result.dtype == object diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py index df7ea46dc4f86..08bb24a01b088 100644 --- a/pandas/tests/series/test_logical_ops.py +++ b/pandas/tests/series/test_logical_ops.py @@ -449,7 +449,7 @@ def test_logical_ops_df_compat(self): tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp.to_frame()) tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp.to_frame()) - exp = pd.DataFrame({"x": [True, True, np.nan, np.nan]}, index=list("ABCD")) + exp = DataFrame({"x": [True, True, np.nan, np.nan]}, index=list("ABCD")) tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp_or1.to_frame()) tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp_or.to_frame()) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index c4cd12fcbdf3b..31c0e7f54d12b 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -82,7 +82,7 @@ def f(x): def test_asfreq_resample_set_correct_freq(self): # GH5613 # we test if .asfreq() and .resample() set the correct value for .freq - df = pd.DataFrame( + df = DataFrame( {"date": ["2012-01-01", "2012-01-02", "2012-01-03"], "col": [1, 2, 3]} ) df = df.set_index(pd.to_datetime(df.date)) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 9c29d3a062dfa..810d98fd5bb89 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -375,11 +375,11 @@ def test_unstack_partial( # https://github.com/pandas-dev/pandas/issues/19351 # make sure DataFrame.unstack() works when its run on a subset of the DataFrame # and the Index levels contain values that are not present in the subset - result = pd.DataFrame(result_rows, columns=result_columns).set_index( + result = DataFrame(result_rows, columns=result_columns).set_index( ["ix1", "ix2"] ) result = result.iloc[1:2].unstack("ix2") - expected = pd.DataFrame( + expected = DataFrame( [expected_row], columns=pd.MultiIndex.from_product( [result_columns[2:], [index_product]], names=[None, "ix2"] @@ -925,7 +925,7 @@ def test_stack_unstack_unordered_multiindex(self): [f"a{x}" for x in values], # a0, a1, .. ] ) - df = pd.DataFrame(data.T, columns=["b", "a"]) + df = DataFrame(data.T, columns=["b", "a"]) df.columns.name = "first" second_level_dict = {"x": df} multi_level_df = pd.concat(second_level_dict, axis=1) @@ -1919,7 +1919,7 @@ def test_multilevel_index_loc_order(self, dim, keys, expected): # GH 22797 # Try to respect order of keys given for MultiIndex.loc kwargs = {dim: [["c", "a", "a", "b", "b"], [1, 1, 2, 1, 2]]} - df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs) + df = DataFrame(np.arange(25).reshape(5, 5), **kwargs) exp_index = MultiIndex.from_arrays(expected) if dim == "index": res = df.loc[keys, :] diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index a070d45089f96..7ee4b86fb4049 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1418,7 +1418,7 @@ def test_dataframe_dtypes(self, cache): def test_dataframe_utc_true(self): # GH 23760 - df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) + df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) result = pd.to_datetime(df, utc=True) expected = Series( np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[ns]") diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index 5174ff005b5fb..6111797d70268 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -265,14 +265,14 @@ def test_assert_frame_equal_interval_dtype_mismatch(): @pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) def test_assert_frame_equal_ignore_extension_dtype_mismatch(right_dtype): # https://github.com/pandas-dev/pandas/issues/35715 - left = pd.DataFrame({"a": [1, 2, 3]}, dtype="Int64") - right = pd.DataFrame({"a": [1, 2, 3]}, dtype=right_dtype) + left = DataFrame({"a": [1, 2, 3]}, dtype="Int64") + right = DataFrame({"a": [1, 2, 3]}, dtype=right_dtype) tm.assert_frame_equal(left, right, check_dtype=False) def test_allows_duplicate_labels(): - left = pd.DataFrame() - right = pd.DataFrame().set_flags(allows_duplicate_labels=False) + left = DataFrame() + right = DataFrame().set_flags(allows_duplicate_labels=False) tm.assert_frame_equal(left, left) tm.assert_frame_equal(right, right) tm.assert_frame_equal(left, right, check_flags=False) diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index f761b6b4ffd7a..cf618f7c828aa 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -300,19 +300,19 @@ def test_hash_with_tuple(): # GH#28969 array containing a tuple raises on call to arr.astype(str) # apparently a numpy bug github.com/numpy/numpy/issues/9441 - df = pd.DataFrame({"data": [tuple("1"), tuple("2")]}) + df = DataFrame({"data": [tuple("1"), tuple("2")]}) result = hash_pandas_object(df) expected = Series([10345501319357378243, 8331063931016360761], dtype=np.uint64) tm.assert_series_equal(result, expected) - df2 = pd.DataFrame({"data": [tuple([1]), tuple([2])]}) + df2 = DataFrame({"data": [tuple([1]), tuple([2])]}) result = hash_pandas_object(df2) expected = Series([9408946347443669104, 3278256261030523334], dtype=np.uint64) tm.assert_series_equal(result, expected) # require that the elements of such tuples are themselves hashable - df3 = pd.DataFrame({"data": [tuple([1, []]), tuple([2, {}])]}) + df3 = DataFrame({"data": [tuple([1, []]), tuple([2, {}])]}) with pytest.raises(TypeError, match="unhashable type: 'list'"): hash_pandas_object(df3) diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index eb14ecfba1f51..6e5d7b4df00e1 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -305,7 +305,7 @@ def test_preserve_metadata(): ) def test_multiple_agg_funcs(func, window_size, expected_vals): # GH 15072 - df = pd.DataFrame( + df = DataFrame( [ ["A", 10, 20], ["A", 20, 30], @@ -331,7 +331,7 @@ def test_multiple_agg_funcs(func, window_size, expected_vals): columns = pd.MultiIndex.from_tuples( [("low", "mean"), ("low", "max"), ("high", "mean"), ("high", "min")] ) - expected = pd.DataFrame(expected_vals, index=index, columns=columns) + expected = DataFrame(expected_vals, index=index, columns=columns) result = window.agg(dict((("low", ["mean", "max"]), ("high", ["mean", "min"])))) diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py index 3dc1974685226..183d2814920e4 100644 --- a/pandas/tests/window/test_expanding.py +++ b/pandas/tests/window/test_expanding.py @@ -235,7 +235,7 @@ def test_iter_expanding_series(ser, expected, min_periods): def test_center_deprecate_warning(): # GH 20647 - df = pd.DataFrame() + df = DataFrame() with tm.assert_produces_warning(FutureWarning): df.expanding(center=True) diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index fbdf8c775530a..101d65c885c9b 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -131,7 +131,7 @@ def test_rolling_apply(self, raw): def test_rolling_apply_mutability(self): # GH 14013 - df = pd.DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6}) + df = DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6}) g = df.groupby("A") mi = pd.MultiIndex.from_tuples( @@ -140,7 +140,7 @@ def test_rolling_apply_mutability(self): mi.names = ["A", None] # Grouped column should not be a part of the output - expected = pd.DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi) + expected = DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi) result = g.rolling(window=2).sum() tm.assert_frame_equal(result, expected) @@ -221,7 +221,7 @@ def test_groupby_rolling(self, expected_value, raw_value): def foo(x): return int(isinstance(x, np.ndarray)) - df = pd.DataFrame({"id": [1, 1, 1], "value": [1, 2, 3]}) + df = DataFrame({"id": [1, 1, 1], "value": [1, 2, 3]}) result = df.groupby("id").value.rolling(1).apply(foo, raw=raw_value) expected = Series( [expected_value] * 3, @@ -250,9 +250,9 @@ def test_groupby_rolling_center_center(self): ) tm.assert_series_equal(result, expected) - df = pd.DataFrame({"a": ["a"] * 5 + ["b"] * 6, "b": range(11)}) + df = DataFrame({"a": ["a"] * 5 + ["b"] * 6, "b": range(11)}) result = df.groupby("a").rolling(center=True, window=3).mean() - expected = pd.DataFrame( + expected = DataFrame( [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan], index=pd.MultiIndex.from_tuples( ( @@ -274,9 +274,9 @@ def test_groupby_rolling_center_center(self): ) tm.assert_frame_equal(result, expected) - df = pd.DataFrame({"a": ["a"] * 5 + ["b"] * 5, "b": range(10)}) + df = DataFrame({"a": ["a"] * 5 + ["b"] * 5, "b": range(10)}) result = df.groupby("a").rolling(center=True, window=3).mean() - expected = pd.DataFrame( + expected = DataFrame( [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan], index=pd.MultiIndex.from_tuples( ( @@ -299,7 +299,7 @@ def test_groupby_rolling_center_center(self): def test_groupby_rolling_center_on(self): # GH 37141 - df = pd.DataFrame( + df = DataFrame( data={ "Date": pd.date_range("2020-01-01", "2020-01-10"), "gb": ["group_1"] * 6 + ["group_2"] * 4, @@ -335,7 +335,7 @@ def test_groupby_rolling_center_on(self): @pytest.mark.parametrize("min_periods", [5, 4, 3]) def test_groupby_rolling_center_min_periods(self, min_periods): # GH 36040 - df = pd.DataFrame({"group": ["A"] * 10 + ["B"] * 10, "data": range(20)}) + df = DataFrame({"group": ["A"] * 10 + ["B"] * 10, "data": range(20)}) window_size = 5 result = ( @@ -353,7 +353,7 @@ def test_groupby_rolling_center_min_periods(self, min_periods): grp_A_expected = nans + grp_A_mean[num_nans : 10 - num_nans] + nans grp_B_expected = nans + grp_B_mean[num_nans : 10 - num_nans] + nans - expected = pd.DataFrame( + expected = DataFrame( {"group": ["A"] * 10 + ["B"] * 10, "data": grp_A_expected + grp_B_expected} ) @@ -396,7 +396,7 @@ def get_window_bounds( start[start < 0] = min_periods return start, end - df = pd.DataFrame( + df = DataFrame( {"a": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5 ) result = ( @@ -409,7 +409,7 @@ def get_window_bounds( def test_groupby_rolling_subset_with_closed(self): # GH 35549 - df = pd.DataFrame( + df = DataFrame( { "column1": range(6), "column2": range(6), @@ -433,7 +433,7 @@ def test_groupby_rolling_subset_with_closed(self): def test_groupby_subset_rolling_subset_with_closed(self): # GH 35549 - df = pd.DataFrame( + df = DataFrame( { "column1": range(6), "column2": range(6), @@ -481,19 +481,19 @@ def test_groupby_rolling_index_changed(self, func): def test_groupby_rolling_empty_frame(self): # GH 36197 - expected = pd.DataFrame({"s1": []}) + expected = DataFrame({"s1": []}) result = expected.groupby("s1").rolling(window=1).sum() expected.index = pd.MultiIndex.from_tuples([], names=["s1", None]) tm.assert_frame_equal(result, expected) - expected = pd.DataFrame({"s1": [], "s2": []}) + expected = DataFrame({"s1": [], "s2": []}) result = expected.groupby(["s1", "s2"]).rolling(window=1).sum() expected.index = pd.MultiIndex.from_tuples([], names=["s1", "s2", None]) tm.assert_frame_equal(result, expected) def test_groupby_rolling_string_index(self): # GH: 36727 - df = pd.DataFrame( + df = DataFrame( [ ["A", "group_1", pd.Timestamp(2019, 1, 1, 9)], ["B", "group_1", pd.Timestamp(2019, 1, 2, 9)], @@ -508,7 +508,7 @@ def test_groupby_rolling_string_index(self): df["count_to_date"] = groups.cumcount() rolling_groups = groups.rolling("10d", on="eventTime") result = rolling_groups.apply(lambda df: df.shape[0]) - expected = pd.DataFrame( + expected = DataFrame( [ ["A", "group_1", pd.Timestamp(2019, 1, 1, 9), 1.0], ["B", "group_1", pd.Timestamp(2019, 1, 2, 9), 2.0], @@ -523,12 +523,12 @@ def test_groupby_rolling_string_index(self): def test_groupby_rolling_no_sort(self): # GH 36889 result = ( - pd.DataFrame({"foo": [2, 1], "bar": [2, 1]}) + DataFrame({"foo": [2, 1], "bar": [2, 1]}) .groupby("foo", sort=False) .rolling(1) .min() ) - expected = pd.DataFrame( + expected = DataFrame( np.array([[2.0, 2.0], [1.0, 1.0]]), columns=["foo", "bar"], index=pd.MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]), @@ -537,7 +537,7 @@ def test_groupby_rolling_no_sort(self): def test_groupby_rolling_count_closed_on(self): # GH 35869 - df = pd.DataFrame( + df = DataFrame( { "column1": range(6), "column2": range(6), @@ -573,11 +573,11 @@ def test_groupby_rolling_count_closed_on(self): ) def test_groupby_rolling_sem(self, func, kwargs): # GH: 26476 - df = pd.DataFrame( + df = DataFrame( [["a", 1], ["a", 2], ["b", 1], ["b", 2], ["b", 3]], columns=["a", "b"] ) result = getattr(df.groupby("a"), func)(**kwargs).sem() - expected = pd.DataFrame( + expected = DataFrame( {"a": [np.nan] * 5, "b": [np.nan, 0.70711, np.nan, 0.70711, 0.70711]}, index=pd.MultiIndex.from_tuples( [("a", 0), ("a", 1), ("b", 2), ("b", 3), ("b", 4)], names=["a", None] @@ -590,7 +590,7 @@ def test_groupby_rolling_sem(self, func, kwargs): ) def test_groupby_rolling_nans_in_index(self, rollings, key): # GH: 34617 - df = pd.DataFrame( + df = DataFrame( { "a": pd.to_datetime(["2020-06-01 12:00", "2020-06-01 14:00", np.nan]), "b": [1, 2, 3], diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 312b30e4491a6..048f7b8287176 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -155,7 +155,7 @@ def test_closed_one_entry(func): @pytest.mark.parametrize("func", ["min", "max"]) def test_closed_one_entry_groupby(func): # GH24718 - ser = pd.DataFrame( + ser = DataFrame( data={"A": [1, 1, 2], "B": [3, 2, 1]}, index=pd.date_range("2000", periods=3) ) result = getattr( @@ -355,14 +355,14 @@ def test_readonly_array(): def test_rolling_datetime(axis_frame, tz_naive_fixture): # GH-28192 tz = tz_naive_fixture - df = pd.DataFrame( + df = DataFrame( {i: [1] * 2 for i in pd.date_range("2019-8-01", "2019-08-03", freq="D", tz=tz)} ) if axis_frame in [0, "index"]: result = df.T.rolling("2D", axis=axis_frame).sum().T else: result = df.rolling("2D", axis=axis_frame).sum() - expected = pd.DataFrame( + expected = DataFrame( { **{ i: [1.0] * 2 @@ -438,7 +438,7 @@ def test_rolling_window_as_string(): def test_min_periods1(): # GH#6795 - df = pd.DataFrame([0, 1, 2, 1, 0], columns=["a"]) + df = DataFrame([0, 1, 2, 1, 0], columns=["a"]) result = df["a"].rolling(3, center=True, min_periods=1).max() expected = Series([1.0, 2.0, 2.0, 2.0, 1.0], name="a") tm.assert_series_equal(result, expected) @@ -706,7 +706,7 @@ def scaled_sum(*args): @pytest.mark.parametrize("add", [0.0, 2.0]) def test_rolling_numerical_accuracy_kahan_mean(add): # GH: 36031 implementing kahan summation - df = pd.DataFrame( + df = DataFrame( {"A": [3002399751580331.0 + add, -0.0, -0.0]}, index=[ pd.Timestamp("19700101 09:00:00"), @@ -718,7 +718,7 @@ def test_rolling_numerical_accuracy_kahan_mean(add): df.resample("1s").ffill().rolling("3s", closed="left", min_periods=3).mean() ) dates = pd.date_range("19700101 09:00:00", periods=7, freq="S") - expected = pd.DataFrame( + expected = DataFrame( { "A": [ np.nan, @@ -737,7 +737,7 @@ def test_rolling_numerical_accuracy_kahan_mean(add): def test_rolling_numerical_accuracy_kahan_sum(): # GH: 13254 - df = pd.DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=["x"]) + df = DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=["x"]) result = df["x"].rolling(3).sum() expected = Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name="x") tm.assert_series_equal(result, expected) @@ -750,7 +750,7 @@ def test_rolling_numerical_accuracy_jump(): ) data = np.random.rand(len(index)) - df = pd.DataFrame({"data": data}, index=index) + df = DataFrame({"data": data}, index=index) result = df.rolling("60s").mean() tm.assert_frame_equal(result, df[["data"]]) @@ -784,10 +784,10 @@ def test_rolling_numerical_too_large_numbers(): ) def test_rolling_mixed_dtypes_axis_1(func, value): # GH: 20649 - df = pd.DataFrame(1, index=[1, 2], columns=["a", "b", "c"]) + df = DataFrame(1, index=[1, 2], columns=["a", "b", "c"]) df["c"] = 1.0 result = getattr(df.rolling(window=2, min_periods=1, axis=1), func)() - expected = pd.DataFrame( + expected = DataFrame( {"a": [1.0, 1.0], "b": [value, value], "c": [value, value]}, index=[1, 2] ) tm.assert_frame_equal(result, expected) @@ -795,7 +795,7 @@ def test_rolling_mixed_dtypes_axis_1(func, value): def test_rolling_axis_one_with_nan(): # GH: 35596 - df = pd.DataFrame( + df = DataFrame( [ [0, 1, 2, 4, np.nan, np.nan, np.nan], [0, 1, 2, np.nan, np.nan, np.nan, np.nan], @@ -803,7 +803,7 @@ def test_rolling_axis_one_with_nan(): ] ) result = df.rolling(window=7, min_periods=1, axis="columns").sum() - expected = pd.DataFrame( + expected = DataFrame( [ [0.0, 1.0, 3.0, 7.0, 7.0, 7.0, 7.0], [0.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0], @@ -819,17 +819,17 @@ def test_rolling_axis_one_with_nan(): ) def test_rolling_axis_1_non_numeric_dtypes(value): # GH: 20649 - df = pd.DataFrame({"a": [1, 2]}) + df = DataFrame({"a": [1, 2]}) df["b"] = value result = df.rolling(window=2, min_periods=1, axis=1).sum() - expected = pd.DataFrame({"a": [1.0, 2.0]}) + expected = DataFrame({"a": [1.0, 2.0]}) tm.assert_frame_equal(result, expected) def test_rolling_on_df_transposed(): # GH: 32724 - df = pd.DataFrame({"A": [1, None], "B": [4, 5], "C": [7, 8]}) - expected = pd.DataFrame({"A": [1.0, np.nan], "B": [5.0, 5.0], "C": [11.0, 13.0]}) + df = DataFrame({"A": [1, None], "B": [4, 5], "C": [7, 8]}) + expected = DataFrame({"A": [1.0, np.nan], "B": [5.0, 5.0], "C": [11.0, 13.0]}) result = df.rolling(min_periods=1, window=2, axis=1).sum() tm.assert_frame_equal(result, expected)
Follow up to https://github.com/pandas-dev/pandas/pull/37188
https://api.github.com/repos/pandas-dev/pandas/pulls/37298
2020-10-21T01:23:52Z
2020-10-21T12:31:09Z
2020-10-21T12:31:09Z
2020-10-22T08:19:42Z
CLN: _almost_ always rebox_native following _unbox
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index de0a246861961..f2a0173c0d593 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -508,7 +508,8 @@ def _validate_shift_value(self, fill_value): ) fill_value = new_fill - return self._unbox(fill_value) + rv = self._unbox(fill_value) + return self._rebox_native(rv) def _validate_scalar(self, value, msg: Optional[str] = None): """ @@ -603,18 +604,15 @@ def _validate_setitem_value(self, value): else: value = self._validate_scalar(value, msg) - return self._unbox(value, setitem=True) + rv = self._unbox(value, setitem=True) + return self._rebox_native(rv) def _validate_insert_value(self, value): msg = f"cannot insert {type(self).__name__} with incompatible label" value = self._validate_scalar(value, msg) - self._check_compatible_with(value, setitem=True) - # TODO: if we dont have compat, should we raise or astype(object)? - # PeriodIndex does astype(object) - return value - # Note: we do not unbox here because the caller needs boxed value - # to check for freq. + rv = self._unbox(value, setitem=True) + return self._rebox_native(rv) def _validate_where_value(self, other): msg = f"Where requires matching dtype, not {type(other)}" @@ -623,7 +621,8 @@ def _validate_where_value(self, other): else: other = self._validate_listlike(other) - return self._unbox(other, setitem=True) + rv = self._unbox(other, setitem=True) + return self._rebox_native(rv) def _unbox(self, other, setitem: bool = False) -> Union[np.int64, np.ndarray]: """ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index b6836a0bbe496..f67d1ec0aa65d 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -484,7 +484,7 @@ def isin(self, values, level=None): @Appender(Index.where.__doc__) def where(self, cond, other=None): - values = self.view("i8") + values = self._data._ndarray try: other = self._data._validate_where_value(other) @@ -493,7 +493,7 @@ def where(self, cond, other=None): oth = getattr(other, "dtype", other) raise TypeError(f"Where requires matching dtype, not {oth}") from err - result = np.where(cond, values, other).astype("i8") + result = np.where(cond, values, other) arr = self._data._from_backing_data(result) return type(self)._simple_new(arr, name=self.name) @@ -610,7 +610,8 @@ def insert(self, loc: int, item): ------- new_index : Index """ - item = self._data._validate_insert_value(item) + value = self._data._validate_insert_value(item) + item = self._data._box_func(value) freq = None if is_period_dtype(self.dtype): @@ -630,10 +631,8 @@ def insert(self, loc: int, item): freq = self.freq arr = self._data - item = arr._unbox_scalar(item) - item = arr._rebox_native(item) - new_values = np.concatenate([arr._ndarray[:loc], [item], arr._ndarray[loc:]]) + new_values = np.concatenate([arr._ndarray[:loc], [value], arr._ndarray[loc:]]) new_arr = self._data._from_backing_data(new_values) new_arr._freq = freq
A couple more special cases to handle, after which we will always _rebox_native and it can be rolled into _unbox so we can get rid of the method altogether
https://api.github.com/repos/pandas-dev/pandas/pulls/37297
2020-10-21T01:21:34Z
2020-10-21T12:56:53Z
2020-10-21T12:56:53Z
2020-10-21T15:07:35Z
REF: de-duplicate DTA/TDA validators by standardizing exception messages
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index f2a0173c0d593..7aa58e5268948 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -419,7 +419,7 @@ def _from_factorized(cls, values, original): # Validation Methods # TODO: try to de-duplicate these, ensure identical behavior - def _validate_comparison_value(self, other, opname: str): + def _validate_comparison_value(self, other): if isinstance(other, str): try: # GH#18435 strings get a pass from tzawareness compat @@ -429,7 +429,7 @@ def _validate_comparison_value(self, other, opname: str): raise InvalidComparison(other) if isinstance(other, self._recognized_scalars) or other is NaT: - other = self._scalar_type(other) # type: ignore[call-arg] + other = self._scalar_type(other) try: self._check_compatible_with(other) except TypeError as err: @@ -477,7 +477,7 @@ def _validate_fill_value(self, fill_value): f"Got '{str(fill_value)}'." ) try: - fill_value = self._validate_scalar(fill_value, msg) + fill_value = self._validate_scalar(fill_value) except TypeError as err: raise ValueError(msg) from err rv = self._unbox(fill_value) @@ -511,17 +511,16 @@ def _validate_shift_value(self, fill_value): rv = self._unbox(fill_value) return self._rebox_native(rv) - def _validate_scalar(self, value, msg: Optional[str] = None): + def _validate_scalar(self, value, allow_listlike: bool = False): """ Validate that the input value can be cast to our scalar_type. Parameters ---------- value : object - msg : str, optional. - Message to raise in TypeError on invalid input. - If not provided, `value` is cast to a str and used - as the message. + allow_listlike: bool, default False + When raising an exception, whether the message should say + listlike inputs are allowed. Returns ------- @@ -532,6 +531,7 @@ def _validate_scalar(self, value, msg: Optional[str] = None): try: value = self._scalar_from_string(value) except ValueError as err: + msg = self._validation_error_message(value, allow_listlike) raise TypeError(msg) from err elif is_valid_nat_for_dtype(value, self.dtype): @@ -543,12 +543,38 @@ def _validate_scalar(self, value, msg: Optional[str] = None): value = self._scalar_type(value) # type: ignore[call-arg] else: - if msg is None: - msg = str(value) + msg = self._validation_error_message(value, allow_listlike) raise TypeError(msg) return value + def _validation_error_message(self, value, allow_listlike: bool = False) -> str: + """ + Construct an exception message on validation error. + + Some methods allow only scalar inputs, while others allow either scalar + or listlike. + + Parameters + ---------- + allow_listlike: bool, default False + + Returns + ------- + str + """ + if allow_listlike: + msg = ( + f"value should be a '{self._scalar_type.__name__}', 'NaT', " + f"or array of those. Got '{type(value).__name__}' instead." + ) + else: + msg = ( + f"value should be a '{self._scalar_type.__name__}' or 'NaT'. " + f"Got '{type(value).__name__}' instead." + ) + return msg + def _validate_listlike(self, value, allow_object: bool = False): if isinstance(value, type(self)): return value @@ -585,9 +611,8 @@ def _validate_listlike(self, value, allow_object: bool = False): return value def _validate_searchsorted_value(self, value): - msg = "searchsorted requires compatible dtype or scalar" if not is_list_like(value): - value = self._validate_scalar(value, msg) + value = self._validate_scalar(value, True) else: value = self._validate_listlike(value) @@ -595,29 +620,23 @@ def _validate_searchsorted_value(self, value): return self._rebox_native(rv) def _validate_setitem_value(self, value): - msg = ( - f"'value' should be a '{self._scalar_type.__name__}', 'NaT', " - f"or array of those. Got '{type(value).__name__}' instead." - ) if is_list_like(value): value = self._validate_listlike(value) else: - value = self._validate_scalar(value, msg) + value = self._validate_scalar(value, True) rv = self._unbox(value, setitem=True) return self._rebox_native(rv) def _validate_insert_value(self, value): - msg = f"cannot insert {type(self).__name__} with incompatible label" - value = self._validate_scalar(value, msg) + value = self._validate_scalar(value) rv = self._unbox(value, setitem=True) return self._rebox_native(rv) def _validate_where_value(self, other): - msg = f"Where requires matching dtype, not {type(other)}" if not is_list_like(other): - other = self._validate_scalar(other, msg) + other = self._validate_scalar(other, True) else: other = self._validate_listlike(other) @@ -847,7 +866,7 @@ def _cmp_method(self, other, op): return op(self.ravel(), other.ravel()).reshape(self.shape) try: - other = self._validate_comparison_value(other, f"__{op.__name__}__") + other = self._validate_comparison_value(other) except InvalidComparison: return invalid_comparison(self, other, op) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index ed7c7c31c6b8d..3d34948018be4 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -399,7 +399,7 @@ def test_setitem_raises(self): with pytest.raises(IndexError, match="index 12 is out of bounds"): arr[12] = val - with pytest.raises(TypeError, match="'value' should be a.* 'object'"): + with pytest.raises(TypeError, match="value should be a.* 'object'"): arr[0] = object() msg = "cannot set using a list-like indexer with a different length" @@ -1032,7 +1032,7 @@ def test_casting_nat_setitem_array(array, casting_nats): ) def test_invalid_nat_setitem_array(array, non_casting_nats): msg = ( - "'value' should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. " + "value should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. " "Got '(timedelta64|datetime64|int)' instead." ) diff --git a/pandas/tests/indexes/datetimes/test_insert.py b/pandas/tests/indexes/datetimes/test_insert.py index b4f6cc3798f4f..d2c999f61b4bb 100644 --- a/pandas/tests/indexes/datetimes/test_insert.py +++ b/pandas/tests/indexes/datetimes/test_insert.py @@ -21,7 +21,8 @@ def test_insert_nat(self, tz, null): @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) def test_insert_invalid_na(self, tz): idx = DatetimeIndex(["2017-01-01"], tz=tz) - with pytest.raises(TypeError, match="incompatible label"): + msg = "value should be a 'Timestamp' or 'NaT'. Got 'timedelta64' instead." + with pytest.raises(TypeError, match=msg): idx.insert(0, np.timedelta64("NaT")) def test_insert_empty_preserves_freq(self, tz_naive_fixture): @@ -174,7 +175,7 @@ def test_insert_mismatched_types_raises(self, tz_aware_fixture, item): tz = tz_aware_fixture dti = date_range("2019-11-04", periods=9, freq="-1D", name=9, tz=tz) - msg = "incompatible label" + msg = "value should be a 'Timestamp' or 'NaT'. Got '.*' instead" with pytest.raises(TypeError, match=msg): dti.insert(1, item) diff --git a/pandas/tests/indexes/timedeltas/test_insert.py b/pandas/tests/indexes/timedeltas/test_insert.py index 1ebc0a4b1eca0..66fec2310e50c 100644 --- a/pandas/tests/indexes/timedeltas/test_insert.py +++ b/pandas/tests/indexes/timedeltas/test_insert.py @@ -79,7 +79,8 @@ def test_insert_nat(self, null): def test_insert_invalid_na(self): idx = TimedeltaIndex(["4day", "1day", "2day"], name="idx") - with pytest.raises(TypeError, match="incompatible label"): + msg = r"value should be a 'Timedelta' or 'NaT'\. Got 'datetime64' instead\." + with pytest.raises(TypeError, match=msg): idx.insert(0, np.datetime64("NaT")) @pytest.mark.parametrize( @@ -89,7 +90,7 @@ def test_insert_mismatched_types_raises(self, item): # GH#33703 dont cast these to td64 tdi = TimedeltaIndex(["4day", "1day", "2day"], name="idx") - msg = "incompatible label" + msg = r"value should be a 'Timedelta' or 'NaT'\. Got '.*' instead\." with pytest.raises(TypeError, match=msg): tdi.insert(1, item) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 04790cdf6cc9d..d37b5986b57c1 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -444,7 +444,7 @@ def test_insert_index_datetimes(self, fill_val, exp_dtype): with pytest.raises(TypeError, match=msg): obj.insert(1, pd.Timestamp("2012-01-01", tz="Asia/Tokyo")) - msg = "cannot insert DatetimeArray with incompatible label" + msg = "value should be a 'Timestamp' or 'NaT'. Got 'int' instead." with pytest.raises(TypeError, match=msg): obj.insert(1, 1) @@ -461,12 +461,12 @@ def test_insert_index_timedelta64(self): ) # ToDo: must coerce to object - msg = "cannot insert TimedeltaArray with incompatible label" + msg = "value should be a 'Timedelta' or 'NaT'. Got 'Timestamp' instead." with pytest.raises(TypeError, match=msg): obj.insert(1, pd.Timestamp("2012-01-01")) # ToDo: must coerce to object - msg = "cannot insert TimedeltaArray with incompatible label" + msg = "value should be a 'Timedelta' or 'NaT'. Got 'int' instead." with pytest.raises(TypeError, match=msg): obj.insert(1, 1) diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 45c2725c26526..b1928de69ea0f 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -335,7 +335,7 @@ def test_partial_set_invalid(self): df = orig.copy() # don't allow not string inserts - msg = "cannot insert DatetimeArray with incompatible label" + msg = r"value should be a 'Timestamp' or 'NaT'\. Got '.*' instead\." with pytest.raises(TypeError, match=msg): df.loc[100.0, :] = df.iloc[0] diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 90f3a392878d9..6a83c69785c90 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1095,7 +1095,7 @@ def test_datetime_block_can_hold_element(self): assert not block._can_hold_element(val) msg = ( - "'value' should be a 'Timestamp', 'NaT', " + "value should be a 'Timestamp', 'NaT', " "or array of those. Got 'date' instead." ) with pytest.raises(TypeError, match=msg):
After this, _validate_setitem_value is the same as _validate_where_value, and we are close to having _validate_insert_value and _validate_searchsorted_value math too.
https://api.github.com/repos/pandas-dev/pandas/pulls/37293
2020-10-20T23:21:40Z
2020-10-22T00:07:52Z
2020-10-22T00:07:52Z
2020-10-22T01:19:21Z
DOC: fix a small typo
diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst index eb7ee000a9a86..6f6eeada0cfed 100644 --- a/doc/source/getting_started/index.rst +++ b/doc/source/getting_started/index.rst @@ -533,7 +533,7 @@ pandas has great support for time series and has an extensive set of tools for w <div id="collapseTen" class="collapse" data-parent="#accordion"> <div class="card-body"> -Data sets do not only contain numerical data. pandas provides a wide range of functions to cleaning textual data and extract useful information from it. +Data sets do not only contain numerical data. pandas provides a wide range of functions to clean textual data and extract useful information from it. .. raw:: html
an alternative fix would be "pandas provides a wide range of functions for cleaning textual data and extracting useful information from it." - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37291
2020-10-20T22:26:42Z
2020-10-20T22:42:30Z
2020-10-20T22:42:30Z
2022-07-15T23:39:30Z
CLN: make PeriodArray signature match DTA/TDA
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index b4d1787697973..103008f4a7c8c 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -516,7 +516,7 @@ ExtensionArray - Fixed Bug where :class:`DataFrame` column set to scalar extension type via a dict instantion was considered an object type rather than the extension type (:issue:`35965`) - Fixed bug where ``astype()`` with equal dtype and ``copy=False`` would return a new object (:issue:`284881`) - Fixed bug when applying a NumPy ufunc with multiple outputs to a :class:`pandas.arrays.IntegerArray` returning None (:issue:`36913`) - +- Fixed an inconsistency in :class:`PeriodArray`'s ``__init__`` signature to those of :class:`DatetimeArray` and :class:`TimedeltaArray` (:issue:`37289`) Other ^^^^^ diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index bf2b3a0a1c9ba..ba2048a496ef8 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -76,14 +76,14 @@ class PeriodArray(PeriodMixin, dtl.DatelikeOps): converted to ordinals without inference or copy (PeriodArray, ndarray[int64]), or a box around such an array (Series[period], PeriodIndex). + dtype : PeriodDtype, optional + A PeriodDtype instance from which to extract a `freq`. If both + `freq` and `dtype` are specified, then the frequencies must match. freq : str or DateOffset The `freq` to use for the array. Mostly applicable when `values` is an ndarray of integers, when `freq` is required. When `values` is a PeriodArray (or box around), it's checked that ``values.freq`` matches `freq`. - dtype : PeriodDtype, optional - A PeriodDtype instance from which to extract a `freq`. If both - `freq` and `dtype` are specified, then the frequencies must match. copy : bool, default False Whether to copy the ordinals before storing. @@ -148,7 +148,7 @@ class PeriodArray(PeriodMixin, dtl.DatelikeOps): # -------------------------------------------------------------------- # Constructors - def __init__(self, values, freq=None, dtype=None, copy=False): + def __init__(self, values, dtype=None, freq=None, copy=False): freq = validate_dtype_freq(dtype, freq) if freq is not None: @@ -882,7 +882,7 @@ def period_array( if is_datetime64_dtype(data_dtype): return PeriodArray._from_datetime64(data, freq) if is_period_dtype(data_dtype): - return PeriodArray(data, freq) + return PeriodArray(data, freq=freq) # other iterable of some kind if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4f92bb7bd7a87..41968c5972ea5 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -214,7 +214,7 @@ def __new__( if data is None and ordinal is not None: # we strangely ignore `ordinal` if data is passed. ordinal = np.asarray(ordinal, dtype=np.int64) - data = PeriodArray(ordinal, freq) + data = PeriodArray(ordinal, freq=freq) else: # don't pass copy here, since we copy later. data = period_array(data=data, freq=freq)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37289
2020-10-20T21:46:25Z
2020-10-21T12:55:26Z
2020-10-21T12:55:26Z
2020-10-21T15:19:09Z
Regression in offsets caused offsets to be no longer hashable
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index c3868fd147974..043b817bb9026 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :class:`RollingGroupby` causing a segmentation fault with Index of dtype object (:issue:`36727`) - Fixed regression in :meth:`DataFrame.resample(...).apply(...)` raised ``AttributeError`` when input was a :class:`DataFrame` and only a :class:`Series` was evaluated (:issue:`36951`) - Fixed regression in :class:`PeriodDtype` comparing both equal and unequal to its string representation (:issue:`37265`) +- Fixed regression in certain offsets (:meth:`pd.offsets.Day() <pandas.tseries.offsets.Day>` and below) no longer being hashable (:issue:`37267`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 101e86bb37912..98b2ddbd21ee1 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -791,6 +791,11 @@ cdef class Tick(SingleConstructorOffset): def is_anchored(self) -> bool: return False + # This is identical to BaseOffset.__hash__, but has to be redefined here + # for Python 3, because we've redefined __eq__. + def __hash__(self) -> int: + return hash(self._params) + # -------------------------------------------------------------------- # Comparison and Arithmetic Methods diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 6a87c05384689..922aff1792227 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -678,6 +678,11 @@ def test_isAnchored_deprecated(self, offset_types): expected = off.is_anchored() assert result == expected + def test_offsets_hashable(self, offset_types): + # GH: 37267 + off = self._get_offset(offset_types) + assert hash(off) is not None + class TestDateOffset(Base): def setup_method(self, method):
- [x] closes #37267 - [x] tests added / passed Not quite sure, if these test is sufficient, if yes, I would tests for other functions. - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37288
2020-10-20T21:25:29Z
2020-10-22T00:08:34Z
2020-10-22T00:08:33Z
2020-10-22T21:42:02Z
Backport PR #37256 on branch 1.1.x (BUG: with integer column labels, .info() throws KeyError )
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index ad59711b90f6e..7dd660374a6fc 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -30,6 +30,7 @@ Bug fixes - Bug causing ``groupby(...).sum()`` and similar to not preserve metadata (:issue:`29442`) - Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` raising a ``ValueError`` when the target was read-only (:issue:`37174`) - Bug in :meth:`GroupBy.fillna` that introduced a performance regression after 1.0.5 (:issue:`36757`) +- Bug in :meth:`DataFrame.info` was raising a ``KeyError`` when the DataFrame has integer column names (:issue:`37245`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py index 7a53b46a4ac0f..db6704f7a96a4 100644 --- a/pandas/io/formats/info.py +++ b/pandas/io/formats/info.py @@ -332,13 +332,13 @@ def _verbose_repr( ) for i, col in enumerate(ids): - dtype = dtypes[i] + dtype = dtypes.iloc[i] col = pprint_thing(col) line_no = _put_str(f" {i}", space_num) count = "" if show_counts: - count = counts[i] + count = counts.iloc[i] lines.append( line_no diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py index 877bd1650ae60..5ef2ce9c47236 100644 --- a/pandas/tests/io/formats/test_info.py +++ b/pandas/tests/io/formats/test_info.py @@ -403,3 +403,25 @@ def test_info_categorical(): buf = StringIO() df.info(buf=buf) + + +def test_info_int_columns(): + # GH#37245 + df = DataFrame({1: [1, 2], 2: [2, 3]}, index=["A", "B"]) + buf = StringIO() + df.info(null_counts=True, buf=buf) + result = buf.getvalue() + expected = textwrap.dedent( + """\ + <class 'pandas.core.frame.DataFrame'> + Index: 2 entries, A to B + Data columns (total 2 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 1 2 non-null int64 + 1 2 2 non-null int64 + dtypes: int64(2) + memory usage: 48.0+ bytes + """ + ) + assert result == expected
Backport PR #37256: BUG: with integer column labels, .info() throws KeyError
https://api.github.com/repos/pandas-dev/pandas/pulls/37285
2020-10-20T19:48:53Z
2020-10-21T10:51:54Z
2020-10-21T10:51:54Z
2020-10-21T10:51:54Z
DOC: update the URL for the "Mailing List"
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index 4aba8f709fba0..c6deb4b7ea383 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -17,7 +17,7 @@ pandas documentation `Source Repository <https://github.com/pandas-dev/pandas>`__ | `Issues & Ideas <https://github.com/pandas-dev/pandas/issues>`__ | `Q&A Support <https://stackoverflow.com/questions/tagged/pandas>`__ | -`Mailing List <https://groups.google.com/forum/#!forum/pydata>`__ +`Mailing List <https://groups.google.com/g/pydata>`__ :mod:`pandas` is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the `Python <https://www.python.org/>`__
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37284
2020-10-20T19:17:26Z
2020-10-20T23:02:15Z
2020-10-20T23:02:15Z
2022-07-15T23:39:32Z
REF/TST: collect reindex tests
diff --git a/pandas/tests/frame/methods/test_align.py b/pandas/tests/frame/methods/test_align.py index d19b59debfdea..86639065ba5c2 100644 --- a/pandas/tests/frame/methods/test_align.py +++ b/pandas/tests/frame/methods/test_align.py @@ -1,12 +1,39 @@ import numpy as np import pytest +import pytz import pandas as pd -from pandas import DataFrame, Index, Series +from pandas import DataFrame, Index, Series, date_range import pandas._testing as tm class TestDataFrameAlign: + def test_frame_align_aware(self): + idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern") + idx2 = date_range("2001", periods=5, freq="2H", tz="US/Eastern") + df1 = DataFrame(np.random.randn(len(idx1), 3), idx1) + df2 = DataFrame(np.random.randn(len(idx2), 3), idx2) + new1, new2 = df1.align(df2) + assert df1.index.tz == new1.index.tz + assert df2.index.tz == new2.index.tz + + # different timezones convert to UTC + + # frame with frame + df1_central = df1.tz_convert("US/Central") + new1, new2 = df1.align(df1_central) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + # frame with Series + new1, new2 = df1.align(df1_central[0], axis=0) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + df1[0].align(df1_central, axis=0) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + def test_align_float(self, float_frame): af, bf = float_frame.align(float_frame) assert af._mgr is not float_frame._mgr diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/methods/test_reindex.py similarity index 92% rename from pandas/tests/frame/test_axis_select_reindex.py rename to pandas/tests/frame/methods/test_reindex.py index 12945533b17ae..99a3bbdf5ffe3 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -4,7 +4,7 @@ import pytest import pandas as pd -from pandas import Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna +from pandas import Categorical, DataFrame, Index, Series, date_range, isna import pandas._testing as tm @@ -12,30 +12,6 @@ class TestDataFrameSelectReindex: # These are specific reindex-based tests; other indexing tests should go in # test_indexing - def test_merge_join_different_levels(self): - # GH 9455 - - # first dataframe - df1 = DataFrame(columns=["a", "b"], data=[[1, 11], [0, 22]]) - - # second dataframe - columns = MultiIndex.from_tuples([("a", ""), ("c", "c1")]) - df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]]) - - # merge - columns = ["a", "b", ("c", "c1")] - expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]]) - with tm.assert_produces_warning(UserWarning): - result = pd.merge(df1, df2, on="a") - tm.assert_frame_equal(result, expected) - - # join, see discussion in GH 12219 - columns = ["a", "b", ("a", ""), ("c", "c1")] - expected = DataFrame(columns=columns, data=[[1, 11, 0, 44], [0, 22, 1, 33]]) - with tm.assert_produces_warning(UserWarning): - result = df1.join(df2, on="a") - tm.assert_frame_equal(result, expected) - def test_reindex(self, float_frame): datetime_series = tm.makeTimeSeries(nper=30) @@ -382,20 +358,6 @@ def test_reindex_api_equivalence(self): for res in [res2, res3]: tm.assert_frame_equal(res1, res) - def test_align_int_fill_bug(self): - # GH #910 - X = np.arange(10 * 10, dtype="float64").reshape(10, 10) - Y = np.ones((10, 1), dtype=int) - - df1 = DataFrame(X) - df1["0.X"] = Y.squeeze() - - df2 = df1.astype(float) - - result = df1 - df1.mean() - expected = df2 - df2.mean() - tm.assert_frame_equal(result, expected) - def test_reindex_boolean(self): frame = DataFrame( np.ones((10, 2), dtype=bool), index=np.arange(0, 20, 2), columns=[0, 2] diff --git a/pandas/tests/frame/methods/test_values.py b/pandas/tests/frame/methods/test_values.py new file mode 100644 index 0000000000000..cd6c5da8dd3a0 --- /dev/null +++ b/pandas/tests/frame/methods/test_values.py @@ -0,0 +1,53 @@ +import numpy as np + +from pandas import DataFrame, Timestamp, date_range +import pandas._testing as tm + + +class TestDataFrameValues: + def test_values_duplicates(self): + df = DataFrame( + [[1, 2, "a", "b"], [1, 2, "a", "b"]], columns=["one", "one", "two", "two"] + ) + + result = df.values + expected = np.array([[1, 2, "a", "b"], [1, 2, "a", "b"]], dtype=object) + + tm.assert_numpy_array_equal(result, expected) + + def test_frame_values_with_tz(self): + tz = "US/Central" + df = DataFrame({"A": date_range("2000", periods=4, tz=tz)}) + result = df.values + expected = np.array( + [ + [Timestamp("2000-01-01", tz=tz)], + [Timestamp("2000-01-02", tz=tz)], + [Timestamp("2000-01-03", tz=tz)], + [Timestamp("2000-01-04", tz=tz)], + ] + ) + tm.assert_numpy_array_equal(result, expected) + + # two columns, homogenous + + df["B"] = df["A"] + result = df.values + expected = np.concatenate([expected, expected], axis=1) + tm.assert_numpy_array_equal(result, expected) + + # three columns, heterogeneous + est = "US/Eastern" + df["C"] = df["A"].dt.tz_convert(est) + + new = np.array( + [ + [Timestamp("2000-01-01T01:00:00", tz=est)], + [Timestamp("2000-01-02T01:00:00", tz=est)], + [Timestamp("2000-01-03T01:00:00", tz=est)], + [Timestamp("2000-01-04T01:00:00", tz=est)], + ] + ) + expected = np.concatenate([expected, new], axis=1) + result = df.values + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 2c04473d50851..de56625209160 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1493,6 +1493,20 @@ def test_dunder_methods_binary(self, all_arithmetic_operators): with pytest.raises(TypeError, match="takes 2 positional arguments"): getattr(df, all_arithmetic_operators)(b, 0) + def test_align_int_fill_bug(self): + # GH#910 + X = np.arange(10 * 10, dtype="float64").reshape(10, 10) + Y = np.ones((10, 1), dtype=int) + + df1 = DataFrame(X) + df1["0.X"] = Y.squeeze() + + df2 = df1.astype(float) + + result = df1 - df1.mean() + expected = df2 - df2.mean() + tm.assert_frame_equal(result, expected) + def test_pow_with_realignment(): # GH#32685 pow has special semantics for operating with null values diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py index 4d6e675c6765f..07cd307c8cc54 100644 --- a/pandas/tests/frame/test_join.py +++ b/pandas/tests/frame/test_join.py @@ -4,7 +4,7 @@ import pytest import pandas as pd -from pandas import DataFrame, Index, period_range +from pandas import DataFrame, Index, MultiIndex, period_range import pandas._testing as tm @@ -292,3 +292,27 @@ def test_join_multiindex_leftright(self): tm.assert_frame_equal(df1.join(df2, how="right"), exp) tm.assert_frame_equal(df2.join(df1, how="left"), exp[["value2", "value1"]]) + + def test_merge_join_different_levels(self): + # GH#9455 + + # first dataframe + df1 = DataFrame(columns=["a", "b"], data=[[1, 11], [0, 22]]) + + # second dataframe + columns = MultiIndex.from_tuples([("a", ""), ("c", "c1")]) + df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]]) + + # merge + columns = ["a", "b", ("c", "c1")] + expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]]) + with tm.assert_produces_warning(UserWarning): + result = pd.merge(df1, df2, on="a") + tm.assert_frame_equal(result, expected) + + # join, see discussion in GH#12219 + columns = ["a", "b", ("a", ""), ("c", "c1")] + expected = DataFrame(columns=columns, data=[[1, 11, 0, 44], [0, 22, 1, 33]]) + with tm.assert_produces_warning(UserWarning): + result = df1.join(df2, on="a") + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index a8b76f4d85f49..c5b923f9a0c1c 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -488,16 +488,6 @@ def test_columns_with_dups(self): xp.columns = ["A", "A", "B"] tm.assert_frame_equal(rs, xp) - def test_values_duplicates(self): - df = DataFrame( - [[1, 2, "a", "b"], [1, 2, "a", "b"]], columns=["one", "one", "two", "two"] - ) - - result = df.values - expected = np.array([[1, 2, "a", "b"], [1, 2, "a", "b"]], dtype=object) - - tm.assert_numpy_array_equal(result, expected) - def test_set_value_by_index(self): # See gh-12344 df = DataFrame(np.arange(9).reshape(3, 3).T) diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py index dfd4fb1855383..bb4e7a157f53e 100644 --- a/pandas/tests/frame/test_timezones.py +++ b/pandas/tests/frame/test_timezones.py @@ -3,7 +3,6 @@ """ import numpy as np import pytest -import pytz from pandas.core.dtypes.dtypes import DatetimeTZDtype @@ -14,43 +13,6 @@ class TestDataFrameTimezones: - def test_frame_values_with_tz(self): - tz = "US/Central" - df = DataFrame({"A": date_range("2000", periods=4, tz=tz)}) - result = df.values - expected = np.array( - [ - [pd.Timestamp("2000-01-01", tz=tz)], - [pd.Timestamp("2000-01-02", tz=tz)], - [pd.Timestamp("2000-01-03", tz=tz)], - [pd.Timestamp("2000-01-04", tz=tz)], - ] - ) - tm.assert_numpy_array_equal(result, expected) - - # two columns, homogenous - - df = df.assign(B=df.A) - result = df.values - expected = np.concatenate([expected, expected], axis=1) - tm.assert_numpy_array_equal(result, expected) - - # three columns, heterogeneous - est = "US/Eastern" - df = df.assign(C=df.A.dt.tz_convert(est)) - - new = np.array( - [ - [pd.Timestamp("2000-01-01T01:00:00", tz=est)], - [pd.Timestamp("2000-01-02T01:00:00", tz=est)], - [pd.Timestamp("2000-01-03T01:00:00", tz=est)], - [pd.Timestamp("2000-01-04T01:00:00", tz=est)], - ] - ) - expected = np.concatenate([expected, new], axis=1) - result = df.values - tm.assert_numpy_array_equal(result, expected) - def test_frame_join_tzaware(self): test1 = DataFrame( np.zeros((6, 3)), @@ -72,32 +34,6 @@ def test_frame_join_tzaware(self): tm.assert_index_equal(result.index, ex_index) assert result.index.tz.zone == "US/Central" - def test_frame_align_aware(self): - idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern") - idx2 = date_range("2001", periods=5, freq="2H", tz="US/Eastern") - df1 = DataFrame(np.random.randn(len(idx1), 3), idx1) - df2 = DataFrame(np.random.randn(len(idx2), 3), idx2) - new1, new2 = df1.align(df2) - assert df1.index.tz == new1.index.tz - assert df2.index.tz == new2.index.tz - - # different timezones convert to UTC - - # frame with frame - df1_central = df1.tz_convert("US/Central") - new1, new2 = df1.align(df1_central) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - # frame with Series - new1, new2 = df1.align(df1_central[0], axis=0) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - df1[0].align(df1_central, axis=0) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) def test_frame_no_datetime64_dtype(self, tz): # after GH#7822
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37283
2020-10-20T19:04:59Z
2020-10-21T00:45:24Z
2020-10-21T00:45:24Z
2020-10-21T00:45:32Z
REF/TST: collect astype tests
diff --git a/pandas/tests/frame/methods/test_convert_dtypes.py b/pandas/tests/frame/methods/test_convert_dtypes.py new file mode 100644 index 0000000000000..cb0da59bc1afa --- /dev/null +++ b/pandas/tests/frame/methods/test_convert_dtypes.py @@ -0,0 +1,28 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestConvertDtypes: + @pytest.mark.parametrize( + "convert_integer, expected", [(False, np.dtype("int32")), (True, "Int32")] + ) + def test_convert_dtypes(self, convert_integer, expected): + # Specific types are tested in tests/series/test_dtypes.py + # Just check that it works for DataFrame here + df = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), + "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), + } + ) + result = df.convert_dtypes(True, True, convert_integer, False) + expected = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype=expected), + "b": pd.Series(["x", "y", "z"], dtype="string"), + } + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index d44c62e1defc7..3e7bdee414c69 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -245,27 +245,6 @@ def test_str_to_small_float_conversion_type(self): expected = DataFrame(col_data, columns=["A"], dtype=float) tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize( - "convert_integer, expected", [(False, np.dtype("int32")), (True, "Int32")] - ) - def test_convert_dtypes(self, convert_integer, expected): - # Specific types are tested in tests/series/test_dtypes.py - # Just check that it works for DataFrame here - df = DataFrame( - { - "a": Series([1, 2, 3], dtype=np.dtype("int32")), - "b": Series(["x", "y", "z"], dtype=np.dtype("O")), - } - ) - result = df.convert_dtypes(True, True, convert_integer, False) - expected = DataFrame( - { - "a": Series([1, 2, 3], dtype=expected), - "b": Series(["x", "y", "z"], dtype="string"), - } - ) - tm.assert_frame_equal(result, expected) - class TestDataFrameDatetimeWithTZ: def test_interleave(self, timezone_frame): diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py index eea839c380f0b..dc9edccb640b5 100644 --- a/pandas/tests/series/methods/test_astype.py +++ b/pandas/tests/series/methods/test_astype.py @@ -1,11 +1,97 @@ +from datetime import datetime, timedelta +from importlib import reload +import string +import sys + import numpy as np import pytest -from pandas import NA, Interval, Series, Timestamp, date_range +from pandas._libs.tslibs import iNaT + +from pandas import ( + NA, + Categorical, + CategoricalDtype, + Index, + Interval, + Series, + Timedelta, + Timestamp, + date_range, +) import pandas._testing as tm +class TestAstypeAPI: + def test_arg_for_errors_in_astype(self): + # see GH#14878 + ser = Series([1, 2, 3]) + + msg = ( + r"Expected value of kwarg 'errors' to be one of \['raise', " + r"'ignore'\]\. Supplied value is 'False'" + ) + with pytest.raises(ValueError, match=msg): + ser.astype(np.float64, errors=False) + + ser.astype(np.int8, errors="raise") + + @pytest.mark.parametrize("dtype_class", [dict, Series]) + def test_astype_dict_like(self, dtype_class): + # see GH#7271 + ser = Series(range(0, 10, 2), name="abc") + + dt1 = dtype_class({"abc": str}) + result = ser.astype(dt1) + expected = Series(["0", "2", "4", "6", "8"], name="abc") + tm.assert_series_equal(result, expected) + + dt2 = dtype_class({"abc": "float64"}) + result = ser.astype(dt2) + expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc") + tm.assert_series_equal(result, expected) + + dt3 = dtype_class({"abc": str, "def": str}) + msg = ( + "Only the Series name can be used for the key in Series dtype " + r"mappings\." + ) + with pytest.raises(KeyError, match=msg): + ser.astype(dt3) + + dt4 = dtype_class({0: str}) + with pytest.raises(KeyError, match=msg): + ser.astype(dt4) + + # GH#16717 + # if dtypes provided is empty, it should error + if dtype_class is Series: + dt5 = dtype_class({}, dtype=object) + else: + dt5 = dtype_class({}) + + with pytest.raises(KeyError, match=msg): + ser.astype(dt5) + + class TestAstype: + @pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64]) + def test_astype_generic_timestamp_no_frequency(self, dtype, request): + # see GH#15524, GH#15987 + data = [1] + s = Series(data) + + if np.dtype(dtype).name not in ["timedelta64", "datetime64"]: + mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit") + request.node.add_marker(mark) + + msg = ( + fr"The '{dtype.__name__}' dtype has no unit\. " + fr"Please pass in '{dtype.__name__}\[ns\]' instead." + ) + with pytest.raises(ValueError, match=msg): + s.astype(dtype) + def test_astype_dt64_to_str(self): # GH#10442 : testing astype(str) is correct for Series/DatetimeIndex dti = date_range("2012-01-01", periods=3) @@ -27,6 +113,87 @@ def test_astype_dt64tz_to_str(self): ) tm.assert_series_equal(result, expected) + def test_astype_datetime(self): + s = Series(iNaT, dtype="M8[ns]", index=range(5)) + + s = s.astype("O") + assert s.dtype == np.object_ + + s = Series([datetime(2001, 1, 2, 0, 0)]) + + s = s.astype("O") + assert s.dtype == np.object_ + + s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)]) + + s[1] = np.nan + assert s.dtype == "M8[ns]" + + s = s.astype("O") + assert s.dtype == np.object_ + + def test_astype_datetime64tz(self): + s = Series(date_range("20130101", periods=3, tz="US/Eastern")) + + # astype + result = s.astype(object) + expected = Series(s.astype(object), dtype=object) + tm.assert_series_equal(result, expected) + + result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz) + tm.assert_series_equal(result, s) + + # astype - object, preserves on construction + result = Series(s.astype(object)) + expected = s.astype(object) + tm.assert_series_equal(result, expected) + + # astype - datetime64[ns, tz] + result = Series(s.values).astype("datetime64[ns, US/Eastern]") + tm.assert_series_equal(result, s) + + result = Series(s.values).astype(s.dtype) + tm.assert_series_equal(result, s) + + result = s.astype("datetime64[ns, CET]") + expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET")) + tm.assert_series_equal(result, expected) + + def test_astype_str_cast_dt64(self): + # see GH#9757 + ts = Series([Timestamp("2010-01-04 00:00:00")]) + s = ts.astype(str) + + expected = Series(["2010-01-04"]) + tm.assert_series_equal(s, expected) + + ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")]) + s = ts.astype(str) + + expected = Series(["2010-01-04 00:00:00-05:00"]) + tm.assert_series_equal(s, expected) + + def test_astype_str_cast_td64(self): + # see GH#9757 + + td = Series([Timedelta(1, unit="d")]) + ser = td.astype(str) + + expected = Series(["1 days"]) + tm.assert_series_equal(ser, expected) + + def test_dt64_series_astype_object(self): + dt64ser = Series(date_range("20130101", periods=3)) + result = dt64ser.astype(object) + assert isinstance(result.iloc[0], datetime) + assert result.dtype == np.object_ + + def test_td64_series_astype_object(self): + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]") + result = tdser.astype(object) + assert isinstance(result.iloc[0], timedelta) + assert result.dtype == np.object_ + @pytest.mark.parametrize( "values", [ @@ -70,3 +237,122 @@ def test_astype_to_str_preserves_na(self, value, string_value): result = s.astype(str) expected = Series(["a", "b", string_value], dtype=object) tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"]) + def test_astype(self, dtype): + s = Series(np.random.randn(5), name="foo") + as_typed = s.astype(dtype) + + assert as_typed.dtype == dtype + assert as_typed.name == s.name + + @pytest.mark.parametrize("value", [np.nan, np.inf]) + @pytest.mark.parametrize("dtype", [np.int32, np.int64]) + def test_astype_cast_nan_inf_int(self, dtype, value): + # gh-14265: check NaN and inf raise error when converting to int + msg = "Cannot convert non-finite values \\(NA or inf\\) to integer" + s = Series([value]) + + with pytest.raises(ValueError, match=msg): + s.astype(dtype) + + @pytest.mark.parametrize("dtype", [int, np.int8, np.int64]) + def test_astype_cast_object_int_fail(self, dtype): + arr = Series(["car", "house", "tree", "1"]) + msg = r"invalid literal for int\(\) with base 10: 'car'" + with pytest.raises(ValueError, match=msg): + arr.astype(dtype) + + def test_astype_cast_object_int(self): + arr = Series(["1", "2", "3", "4"], dtype=object) + result = arr.astype(int) + + tm.assert_series_equal(result, Series(np.arange(1, 5))) + + def test_astype_unicode(self): + # see GH#7758: A bit of magic is required to set + # default encoding to utf-8 + digits = string.digits + test_series = [ + Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]), + Series(["データーサイエンス、お前はもう死んでいる"]), + ] + + former_encoding = None + + if sys.getdefaultencoding() == "utf-8": + test_series.append(Series(["野菜食べないとやばい".encode()])) + + for s in test_series: + res = s.astype("unicode") + expec = s.map(str) + tm.assert_series_equal(res, expec) + + # Restore the former encoding + if former_encoding is not None and former_encoding != "utf-8": + reload(sys) + sys.setdefaultencoding(former_encoding) + + +class TestAstypeCategorical: + def test_astype_categoricaldtype(self): + s = Series(["a", "b", "a"]) + result = s.astype(CategoricalDtype(["a", "b"], ordered=True)) + expected = Series(Categorical(["a", "b", "a"], ordered=True)) + tm.assert_series_equal(result, expected) + + result = s.astype(CategoricalDtype(["a", "b"], ordered=False)) + expected = Series(Categorical(["a", "b", "a"], ordered=False)) + tm.assert_series_equal(result, expected) + + result = s.astype(CategoricalDtype(["a", "b", "c"], ordered=False)) + expected = Series( + Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False) + ) + tm.assert_series_equal(result, expected) + tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"])) + + @pytest.mark.parametrize("name", [None, "foo"]) + @pytest.mark.parametrize("dtype_ordered", [True, False]) + @pytest.mark.parametrize("series_ordered", [True, False]) + def test_astype_categorical_to_categorical( + self, name, dtype_ordered, series_ordered + ): + # GH#10696, GH#18593 + s_data = list("abcaacbab") + s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered) + s = Series(s_data, dtype=s_dtype, name=name) + + # unspecified categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = s.astype(dtype) + exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered) + expected = Series(s_data, name=name, dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + # different categories + dtype = CategoricalDtype(list("adc"), dtype_ordered) + result = s.astype(dtype) + expected = Series(s_data, name=name, dtype=dtype) + tm.assert_series_equal(result, expected) + + if dtype_ordered is False: + # not specifying ordered, so only test once + expected = s + result = s.astype("category") + tm.assert_series_equal(result, expected) + + def test_astype_bool_missing_to_categorical(self): + # GH-19182 + s = Series([True, False, np.nan]) + assert s.dtypes == np.object_ + + result = s.astype(CategoricalDtype(categories=[True, False])) + expected = Series(Categorical([True, False, np.nan], categories=[True, False])) + tm.assert_series_equal(result, expected) + + def test_astype_categories_raises(self): + # deprecated GH#17636, removed in GH#27141 + s = Series(["a", "b", "a"]) + with pytest.raises(TypeError, match="got an unexpected"): + s.astype("category", categories=["a", "b"], ordered=True) diff --git a/pandas/tests/series/methods/test_infer_objects.py b/pandas/tests/series/methods/test_infer_objects.py new file mode 100644 index 0000000000000..bb83f62f5ebb5 --- /dev/null +++ b/pandas/tests/series/methods/test_infer_objects.py @@ -0,0 +1,23 @@ +import numpy as np + +from pandas import Series +import pandas._testing as tm + + +class TestInferObjects: + def test_infer_objects_series(self): + # GH#11221 + actual = Series(np.array([1, 2, 3], dtype="O")).infer_objects() + expected = Series([1, 2, 3]) + tm.assert_series_equal(actual, expected) + + actual = Series(np.array([1, 2, 3, None], dtype="O")).infer_objects() + expected = Series([1.0, 2.0, 3.0, np.nan]) + tm.assert_series_equal(actual, expected) + + # only soft conversions, unconvertable pass thru unchanged + actual = Series(np.array([1, 2, 3, None, "a"], dtype="O")).infer_objects() + expected = Series([1, 2, 3, None, "a"]) + + assert actual.dtype == "object" + tm.assert_series_equal(actual, expected) diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 29c1728be786a..b85a53960b0f6 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -1,123 +1,21 @@ -from datetime import datetime, timedelta -from importlib import reload import string -import sys import numpy as np import pytest -from pandas._libs.tslibs import iNaT - from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd -from pandas import ( - Categorical, - DataFrame, - Index, - Series, - Timedelta, - Timestamp, - date_range, -) +from pandas import Categorical, DataFrame, Series, date_range import pandas._testing as tm class TestSeriesDtypes: - def test_dt64_series_astype_object(self): - dt64ser = Series(date_range("20130101", periods=3)) - result = dt64ser.astype(object) - assert isinstance(result.iloc[0], datetime) - assert result.dtype == np.object_ - - def test_td64_series_astype_object(self): - tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]") - result = tdser.astype(object) - assert isinstance(result.iloc[0], timedelta) - assert result.dtype == np.object_ - - @pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"]) - def test_astype(self, dtype): - s = Series(np.random.randn(5), name="foo") - as_typed = s.astype(dtype) - - assert as_typed.dtype == dtype - assert as_typed.name == s.name - def test_dtype(self, datetime_series): assert datetime_series.dtype == np.dtype("float64") assert datetime_series.dtypes == np.dtype("float64") - @pytest.mark.parametrize("value", [np.nan, np.inf]) - @pytest.mark.parametrize("dtype", [np.int32, np.int64]) - def test_astype_cast_nan_inf_int(self, dtype, value): - # gh-14265: check NaN and inf raise error when converting to int - msg = "Cannot convert non-finite values \\(NA or inf\\) to integer" - s = Series([value]) - - with pytest.raises(ValueError, match=msg): - s.astype(dtype) - - @pytest.mark.parametrize("dtype", [int, np.int8, np.int64]) - def test_astype_cast_object_int_fail(self, dtype): - arr = Series(["car", "house", "tree", "1"]) - msg = r"invalid literal for int\(\) with base 10: 'car'" - with pytest.raises(ValueError, match=msg): - arr.astype(dtype) - - def test_astype_cast_object_int(self): - arr = Series(["1", "2", "3", "4"], dtype=object) - result = arr.astype(int) - - tm.assert_series_equal(result, Series(np.arange(1, 5))) - - def test_astype_datetime(self): - s = Series(iNaT, dtype="M8[ns]", index=range(5)) - - s = s.astype("O") - assert s.dtype == np.object_ - - s = Series([datetime(2001, 1, 2, 0, 0)]) - - s = s.astype("O") - assert s.dtype == np.object_ - - s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)]) - - s[1] = np.nan - assert s.dtype == "M8[ns]" - - s = s.astype("O") - assert s.dtype == np.object_ - - def test_astype_datetime64tz(self): - s = Series(date_range("20130101", periods=3, tz="US/Eastern")) - - # astype - result = s.astype(object) - expected = Series(s.astype(object), dtype=object) - tm.assert_series_equal(result, expected) - - result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz) - tm.assert_series_equal(result, s) - - # astype - object, preserves on construction - result = Series(s.astype(object)) - expected = s.astype(object) - tm.assert_series_equal(result, expected) - - # astype - datetime64[ns, tz] - result = Series(s.values).astype("datetime64[ns, US/Eastern]") - tm.assert_series_equal(result, s) - - result = Series(s.values).astype(s.dtype) - tm.assert_series_equal(result, s) - - result = s.astype("datetime64[ns, CET]") - expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET")) - tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("dtype", [str, np.str_]) @pytest.mark.parametrize( "series", @@ -132,96 +30,6 @@ def test_astype_str_map(self, dtype, series): expected = series.map(str) tm.assert_series_equal(result, expected) - def test_astype_str_cast_dt64(self): - # see gh-9757 - ts = Series([Timestamp("2010-01-04 00:00:00")]) - s = ts.astype(str) - - expected = Series(["2010-01-04"]) - tm.assert_series_equal(s, expected) - - ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")]) - s = ts.astype(str) - - expected = Series(["2010-01-04 00:00:00-05:00"]) - tm.assert_series_equal(s, expected) - - def test_astype_str_cast_td64(self): - # see gh-9757 - - td = Series([Timedelta(1, unit="d")]) - ser = td.astype(str) - - expected = Series(["1 days"]) - tm.assert_series_equal(ser, expected) - - def test_astype_unicode(self): - # see gh-7758: A bit of magic is required to set - # default encoding to utf-8 - digits = string.digits - test_series = [ - Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]), - Series(["データーサイエンス、お前はもう死んでいる"]), - ] - - former_encoding = None - - if sys.getdefaultencoding() == "utf-8": - test_series.append(Series(["野菜食べないとやばい".encode()])) - - for s in test_series: - res = s.astype("unicode") - expec = s.map(str) - tm.assert_series_equal(res, expec) - - # Restore the former encoding - if former_encoding is not None and former_encoding != "utf-8": - reload(sys) - sys.setdefaultencoding(former_encoding) - - @pytest.mark.parametrize("dtype_class", [dict, Series]) - def test_astype_dict_like(self, dtype_class): - # see gh-7271 - s = Series(range(0, 10, 2), name="abc") - - dt1 = dtype_class({"abc": str}) - result = s.astype(dt1) - expected = Series(["0", "2", "4", "6", "8"], name="abc") - tm.assert_series_equal(result, expected) - - dt2 = dtype_class({"abc": "float64"}) - result = s.astype(dt2) - expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc") - tm.assert_series_equal(result, expected) - - dt3 = dtype_class({"abc": str, "def": str}) - msg = ( - "Only the Series name can be used for the key in Series dtype " - r"mappings\." - ) - with pytest.raises(KeyError, match=msg): - s.astype(dt3) - - dt4 = dtype_class({0: str}) - with pytest.raises(KeyError, match=msg): - s.astype(dt4) - - # GH16717 - # if dtypes provided is empty, it should error - if dtype_class is Series: - dt5 = dtype_class({}, dtype=object) - else: - dt5 = dtype_class({}) - - with pytest.raises(KeyError, match=msg): - s.astype(dt5) - - def test_astype_categories_raises(self): - # deprecated 17636, removed in GH-27141 - s = Series(["a", "b", "a"]) - with pytest.raises(TypeError, match="got an unexpected"): - s.astype("category", categories=["a", "b"], ordered=True) - def test_astype_from_categorical(self): items = ["a", "b", "c", "a"] s = Series(items) @@ -325,79 +133,6 @@ def cmp(a, b): with pytest.raises(TypeError, match=msg): invalid(s) - @pytest.mark.parametrize("name", [None, "foo"]) - @pytest.mark.parametrize("dtype_ordered", [True, False]) - @pytest.mark.parametrize("series_ordered", [True, False]) - def test_astype_categorical_to_categorical( - self, name, dtype_ordered, series_ordered - ): - # GH 10696/18593 - s_data = list("abcaacbab") - s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered) - s = Series(s_data, dtype=s_dtype, name=name) - - # unspecified categories - dtype = CategoricalDtype(ordered=dtype_ordered) - result = s.astype(dtype) - exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered) - expected = Series(s_data, name=name, dtype=exp_dtype) - tm.assert_series_equal(result, expected) - - # different categories - dtype = CategoricalDtype(list("adc"), dtype_ordered) - result = s.astype(dtype) - expected = Series(s_data, name=name, dtype=dtype) - tm.assert_series_equal(result, expected) - - if dtype_ordered is False: - # not specifying ordered, so only test once - expected = s - result = s.astype("category") - tm.assert_series_equal(result, expected) - - def test_astype_bool_missing_to_categorical(self): - # GH-19182 - s = Series([True, False, np.nan]) - assert s.dtypes == np.object_ - - result = s.astype(CategoricalDtype(categories=[True, False])) - expected = Series(Categorical([True, False, np.nan], categories=[True, False])) - tm.assert_series_equal(result, expected) - - def test_astype_categoricaldtype(self): - s = Series(["a", "b", "a"]) - result = s.astype(CategoricalDtype(["a", "b"], ordered=True)) - expected = Series(Categorical(["a", "b", "a"], ordered=True)) - tm.assert_series_equal(result, expected) - - result = s.astype(CategoricalDtype(["a", "b"], ordered=False)) - expected = Series(Categorical(["a", "b", "a"], ordered=False)) - tm.assert_series_equal(result, expected) - - result = s.astype(CategoricalDtype(["a", "b", "c"], ordered=False)) - expected = Series( - Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False) - ) - tm.assert_series_equal(result, expected) - tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"])) - - @pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64]) - def test_astype_generic_timestamp_no_frequency(self, dtype, request): - # see gh-15524, gh-15987 - data = [1] - s = Series(data) - - if np.dtype(dtype).name not in ["timedelta64", "datetime64"]: - mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit") - request.node.add_marker(mark) - - msg = ( - fr"The '{dtype.__name__}' dtype has no unit\. " - fr"Please pass in '{dtype.__name__}\[ns\]' instead." - ) - with pytest.raises(ValueError, match=msg): - s.astype(dtype) - @pytest.mark.parametrize("dtype", np.typecodes["All"]) def test_astype_empty_constructor_equality(self, dtype): # see gh-15524 @@ -413,19 +148,6 @@ def test_astype_empty_constructor_equality(self, dtype): as_type_empty = Series([]).astype(dtype) tm.assert_series_equal(init_empty, as_type_empty) - def test_arg_for_errors_in_astype(self): - # see gh-14878 - s = Series([1, 2, 3]) - - msg = ( - r"Expected value of kwarg 'errors' to be one of \['raise', " - r"'ignore'\]\. Supplied value is 'False'" - ) - with pytest.raises(ValueError, match=msg): - s.astype(np.float64, errors=False) - - s.astype(np.int8, errors="raise") - def test_intercept_astype_object(self): series = Series(date_range("1/1/2000", periods=10)) @@ -456,23 +178,6 @@ def test_series_to_categorical(self): tm.assert_series_equal(result, expected) - def test_infer_objects_series(self): - # GH 11221 - actual = Series(np.array([1, 2, 3], dtype="O")).infer_objects() - expected = Series([1, 2, 3]) - tm.assert_series_equal(actual, expected) - - actual = Series(np.array([1, 2, 3, None], dtype="O")).infer_objects() - expected = Series([1.0, 2.0, 3.0, np.nan]) - tm.assert_series_equal(actual, expected) - - # only soft conversions, unconvertable pass thru unchanged - actual = Series(np.array([1, 2, 3, None, "a"], dtype="O")).infer_objects() - expected = Series([1, 2, 3, None, "a"]) - - assert actual.dtype == "object" - tm.assert_series_equal(actual, expected) - @pytest.mark.parametrize( "data", [
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37282
2020-10-20T18:22:43Z
2020-10-21T15:58:53Z
2020-10-21T15:58:53Z
2020-10-21T16:02:09Z
REF/TST: collect fillna tests
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py new file mode 100644 index 0000000000000..9fa1aa65379c5 --- /dev/null +++ b/pandas/tests/frame/methods/test_fillna.py @@ -0,0 +1,526 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + NaT, + PeriodIndex, + Series, + TimedeltaIndex, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.tests.frame.common import _check_mixed_float + + +class TestFillNA: + def test_fillna_datetime(self, datetime_frame): + tf = datetime_frame + tf.loc[tf.index[:5], "A"] = np.nan + tf.loc[tf.index[-5:], "A"] = np.nan + + zero_filled = datetime_frame.fillna(0) + assert (zero_filled.loc[zero_filled.index[:5], "A"] == 0).all() + + padded = datetime_frame.fillna(method="pad") + assert np.isnan(padded.loc[padded.index[:5], "A"]).all() + assert ( + padded.loc[padded.index[-5:], "A"] == padded.loc[padded.index[-5], "A"] + ).all() + + msg = "Must specify a fill 'value' or 'method'" + with pytest.raises(ValueError, match=msg): + datetime_frame.fillna() + msg = "Cannot specify both 'value' and 'method'" + with pytest.raises(ValueError, match=msg): + datetime_frame.fillna(5, method="ffill") + + def test_fillna_mixed_type(self, float_string_frame): + + mf = float_string_frame + mf.loc[mf.index[5:20], "foo"] = np.nan + mf.loc[mf.index[-10:], "A"] = np.nan + # TODO: make stronger assertion here, GH 25640 + mf.fillna(value=0) + mf.fillna(method="pad") + + def test_fillna_mixed_float(self, mixed_float_frame): + + # mixed numeric (but no float16) + mf = mixed_float_frame.reindex(columns=["A", "B", "D"]) + mf.loc[mf.index[-10:], "A"] = np.nan + result = mf.fillna(value=0) + _check_mixed_float(result, dtype=dict(C=None)) + + result = mf.fillna(method="pad") + _check_mixed_float(result, dtype=dict(C=None)) + + def test_fillna_empty(self): + # empty frame (GH#2778) + df = DataFrame(columns=["x"]) + for m in ["pad", "backfill"]: + df.x.fillna(method=m, inplace=True) + df.x.fillna(method=m) + + def test_fillna_different_dtype(self): + # with different dtype (GH#3386) + df = DataFrame( + [["a", "a", np.nan, "a"], ["b", "b", np.nan, "b"], ["c", "c", np.nan, "c"]] + ) + + result = df.fillna({2: "foo"}) + expected = DataFrame( + [["a", "a", "foo", "a"], ["b", "b", "foo", "b"], ["c", "c", "foo", "c"]] + ) + tm.assert_frame_equal(result, expected) + + return_value = df.fillna({2: "foo"}, inplace=True) + tm.assert_frame_equal(df, expected) + assert return_value is None + + def test_fillna_limit_and_value(self): + # limit and value + df = DataFrame(np.random.randn(10, 3)) + df.iloc[2:7, 0] = np.nan + df.iloc[3:5, 2] = np.nan + + expected = df.copy() + expected.iloc[2, 0] = 999 + expected.iloc[3, 2] = 999 + result = df.fillna(999, limit=1) + tm.assert_frame_equal(result, expected) + + def test_fillna_datelike(self): + # with datelike + # GH#6344 + df = DataFrame( + { + "Date": [NaT, Timestamp("2014-1-1")], + "Date2": [Timestamp("2013-1-1"), NaT], + } + ) + + expected = df.copy() + expected["Date"] = expected["Date"].fillna(df.loc[df.index[0], "Date2"]) + result = df.fillna(value={"Date": df["Date2"]}) + tm.assert_frame_equal(result, expected) + + def test_fillna_tzaware(self): + # with timezone + # GH#15855 + df = DataFrame({"A": [Timestamp("2012-11-11 00:00:00+01:00"), NaT]}) + exp = DataFrame( + { + "A": [ + Timestamp("2012-11-11 00:00:00+01:00"), + Timestamp("2012-11-11 00:00:00+01:00"), + ] + } + ) + tm.assert_frame_equal(df.fillna(method="pad"), exp) + + df = DataFrame({"A": [NaT, Timestamp("2012-11-11 00:00:00+01:00")]}) + exp = DataFrame( + { + "A": [ + Timestamp("2012-11-11 00:00:00+01:00"), + Timestamp("2012-11-11 00:00:00+01:00"), + ] + } + ) + tm.assert_frame_equal(df.fillna(method="bfill"), exp) + + def test_fillna_tzaware_different_column(self): + # with timezone in another column + # GH#15522 + df = DataFrame( + { + "A": date_range("20130101", periods=4, tz="US/Eastern"), + "B": [1, 2, np.nan, np.nan], + } + ) + result = df.fillna(method="pad") + expected = DataFrame( + { + "A": date_range("20130101", periods=4, tz="US/Eastern"), + "B": [1.0, 2.0, 2.0, 2.0], + } + ) + tm.assert_frame_equal(result, expected) + + def test_na_actions_categorical(self): + + cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3]) + vals = ["a", "b", np.nan, "d"] + df = DataFrame({"cats": cat, "vals": vals}) + cat2 = Categorical([1, 2, 3, 3], categories=[1, 2, 3]) + vals2 = ["a", "b", "b", "d"] + df_exp_fill = DataFrame({"cats": cat2, "vals": vals2}) + cat3 = Categorical([1, 2, 3], categories=[1, 2, 3]) + vals3 = ["a", "b", np.nan] + df_exp_drop_cats = DataFrame({"cats": cat3, "vals": vals3}) + cat4 = Categorical([1, 2], categories=[1, 2, 3]) + vals4 = ["a", "b"] + df_exp_drop_all = DataFrame({"cats": cat4, "vals": vals4}) + + # fillna + res = df.fillna(value={"cats": 3, "vals": "b"}) + tm.assert_frame_equal(res, df_exp_fill) + + msg = "'fill_value=4' is not present in this Categorical's categories" + with pytest.raises(ValueError, match=msg): + df.fillna(value={"cats": 4, "vals": "c"}) + + res = df.fillna(method="pad") + tm.assert_frame_equal(res, df_exp_fill) + + # dropna + res = df.dropna(subset=["cats"]) + tm.assert_frame_equal(res, df_exp_drop_cats) + + res = df.dropna() + tm.assert_frame_equal(res, df_exp_drop_all) + + # make sure that fillna takes missing values into account + c = Categorical([np.nan, "b", np.nan], categories=["a", "b"]) + df = DataFrame({"cats": c, "vals": [1, 2, 3]}) + + cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"]) + df_exp = DataFrame({"cats": cat_exp, "vals": [1, 2, 3]}) + + res = df.fillna("a") + tm.assert_frame_equal(res, df_exp) + + def test_fillna_categorical_nan(self): + # GH#14021 + # np.nan should always be a valid filler + cat = Categorical([np.nan, 2, np.nan]) + val = Categorical([np.nan, np.nan, np.nan]) + df = DataFrame({"cats": cat, "vals": val}) + + # GH#32950 df.median() is poorly behaved because there is no + # Categorical.median + median = Series({"cats": 2.0, "vals": np.nan}) + + res = df.fillna(median) + v_exp = [np.nan, np.nan, np.nan] + df_exp = DataFrame({"cats": [2, 2, 2], "vals": v_exp}, dtype="category") + tm.assert_frame_equal(res, df_exp) + + result = df.cats.fillna(np.nan) + tm.assert_series_equal(result, df.cats) + + result = df.vals.fillna(np.nan) + tm.assert_series_equal(result, df.vals) + + idx = DatetimeIndex( + ["2011-01-01 09:00", "2016-01-01 23:45", "2011-01-01 09:00", NaT, NaT] + ) + df = DataFrame({"a": Categorical(idx)}) + tm.assert_frame_equal(df.fillna(value=NaT), df) + + idx = PeriodIndex(["2011-01", "2011-01", "2011-01", NaT, NaT], freq="M") + df = DataFrame({"a": Categorical(idx)}) + tm.assert_frame_equal(df.fillna(value=NaT), df) + + idx = TimedeltaIndex(["1 days", "2 days", "1 days", NaT, NaT]) + df = DataFrame({"a": Categorical(idx)}) + tm.assert_frame_equal(df.fillna(value=NaT), df) + + def test_fillna_downcast(self): + # GH#15277 + # infer int64 from float64 + df = DataFrame({"a": [1.0, np.nan]}) + result = df.fillna(0, downcast="infer") + expected = DataFrame({"a": [1, 0]}) + tm.assert_frame_equal(result, expected) + + # infer int64 from float64 when fillna value is a dict + df = DataFrame({"a": [1.0, np.nan]}) + result = df.fillna({"a": 0}, downcast="infer") + expected = DataFrame({"a": [1, 0]}) + tm.assert_frame_equal(result, expected) + + def test_fillna_dtype_conversion(self): + # make sure that fillna on an empty frame works + df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5]) + result = df.dtypes + expected = Series([np.dtype("object")] * 5, index=[1, 2, 3, 4, 5]) + tm.assert_series_equal(result, expected) + + result = df.fillna(1) + expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5]) + tm.assert_frame_equal(result, expected) + + # empty block + df = DataFrame(index=range(3), columns=["A", "B"], dtype="float64") + result = df.fillna("nan") + expected = DataFrame("nan", index=range(3), columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + # equiv of replace + df = DataFrame(dict(A=[1, np.nan], B=[1.0, 2.0])) + for v in ["", 1, np.nan, 1.0]: + expected = df.replace(np.nan, v) + result = df.fillna(v) + tm.assert_frame_equal(result, expected) + + def test_fillna_datetime_columns(self): + # GH#7095 + df = DataFrame( + { + "A": [-1, -2, np.nan], + "B": date_range("20130101", periods=3), + "C": ["foo", "bar", None], + "D": ["foo2", "bar2", None], + }, + index=date_range("20130110", periods=3), + ) + result = df.fillna("?") + expected = DataFrame( + { + "A": [-1, -2, "?"], + "B": date_range("20130101", periods=3), + "C": ["foo", "bar", "?"], + "D": ["foo2", "bar2", "?"], + }, + index=date_range("20130110", periods=3), + ) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + { + "A": [-1, -2, np.nan], + "B": [Timestamp("2013-01-01"), Timestamp("2013-01-02"), NaT], + "C": ["foo", "bar", None], + "D": ["foo2", "bar2", None], + }, + index=date_range("20130110", periods=3), + ) + result = df.fillna("?") + expected = DataFrame( + { + "A": [-1, -2, "?"], + "B": [Timestamp("2013-01-01"), Timestamp("2013-01-02"), "?"], + "C": ["foo", "bar", "?"], + "D": ["foo2", "bar2", "?"], + }, + index=date_range("20130110", periods=3), + ) + tm.assert_frame_equal(result, expected) + + def test_ffill(self, datetime_frame): + datetime_frame["A"][:5] = np.nan + datetime_frame["A"][-5:] = np.nan + + tm.assert_frame_equal( + datetime_frame.ffill(), datetime_frame.fillna(method="ffill") + ) + + def test_bfill(self, datetime_frame): + datetime_frame["A"][:5] = np.nan + datetime_frame["A"][-5:] = np.nan + + tm.assert_frame_equal( + datetime_frame.bfill(), datetime_frame.fillna(method="bfill") + ) + + def test_frame_pad_backfill_limit(self): + index = np.arange(10) + df = DataFrame(np.random.randn(10, 4), index=index) + + result = df[:2].reindex(index, method="pad", limit=5) + + expected = df[:2].reindex(index).fillna(method="pad") + expected.values[-3:] = np.nan + tm.assert_frame_equal(result, expected) + + result = df[-2:].reindex(index, method="backfill", limit=5) + + expected = df[-2:].reindex(index).fillna(method="backfill") + expected.values[:3] = np.nan + tm.assert_frame_equal(result, expected) + + def test_frame_fillna_limit(self): + index = np.arange(10) + df = DataFrame(np.random.randn(10, 4), index=index) + + result = df[:2].reindex(index) + result = result.fillna(method="pad", limit=5) + + expected = df[:2].reindex(index).fillna(method="pad") + expected.values[-3:] = np.nan + tm.assert_frame_equal(result, expected) + + result = df[-2:].reindex(index) + result = result.fillna(method="backfill", limit=5) + + expected = df[-2:].reindex(index).fillna(method="backfill") + expected.values[:3] = np.nan + tm.assert_frame_equal(result, expected) + + def test_fillna_skip_certain_blocks(self): + # don't try to fill boolean, int blocks + + df = DataFrame(np.random.randn(10, 4).astype(int)) + + # it works! + df.fillna(np.nan) + + @pytest.mark.parametrize("type", [int, float]) + def test_fillna_positive_limit(self, type): + df = DataFrame(np.random.randn(10, 4)).astype(type) + + msg = "Limit must be greater than 0" + with pytest.raises(ValueError, match=msg): + df.fillna(0, limit=-5) + + @pytest.mark.parametrize("type", [int, float]) + def test_fillna_integer_limit(self, type): + df = DataFrame(np.random.randn(10, 4)).astype(type) + + msg = "Limit must be an integer" + with pytest.raises(ValueError, match=msg): + df.fillna(0, limit=0.5) + + def test_fillna_inplace(self): + df = DataFrame(np.random.randn(10, 4)) + df[1][:4] = np.nan + df[3][-4:] = np.nan + + expected = df.fillna(value=0) + assert expected is not df + + df.fillna(value=0, inplace=True) + tm.assert_frame_equal(df, expected) + + expected = df.fillna(value={0: 0}, inplace=True) + assert expected is None + + df[1][:4] = np.nan + df[3][-4:] = np.nan + expected = df.fillna(method="ffill") + assert expected is not df + + df.fillna(method="ffill", inplace=True) + tm.assert_frame_equal(df, expected) + + def test_fillna_dict_series(self): + df = DataFrame( + { + "a": [np.nan, 1, 2, np.nan, np.nan], + "b": [1, 2, 3, np.nan, np.nan], + "c": [np.nan, 1, 2, 3, 4], + } + ) + + result = df.fillna({"a": 0, "b": 5}) + + expected = df.copy() + expected["a"] = expected["a"].fillna(0) + expected["b"] = expected["b"].fillna(5) + tm.assert_frame_equal(result, expected) + + # it works + result = df.fillna({"a": 0, "b": 5, "d": 7}) + + # Series treated same as dict + result = df.fillna(df.max()) + expected = df.fillna(df.max().to_dict()) + tm.assert_frame_equal(result, expected) + + # disable this for now + with pytest.raises(NotImplementedError, match="column by column"): + df.fillna(df.max(1), axis=1) + + def test_fillna_dataframe(self): + # GH#8377 + df = DataFrame( + { + "a": [np.nan, 1, 2, np.nan, np.nan], + "b": [1, 2, 3, np.nan, np.nan], + "c": [np.nan, 1, 2, 3, 4], + }, + index=list("VWXYZ"), + ) + + # df2 may have different index and columns + df2 = DataFrame( + { + "a": [np.nan, 10, 20, 30, 40], + "b": [50, 60, 70, 80, 90], + "foo": ["bar"] * 5, + }, + index=list("VWXuZ"), + ) + + result = df.fillna(df2) + + # only those columns and indices which are shared get filled + expected = DataFrame( + { + "a": [np.nan, 1, 2, np.nan, 40], + "b": [1, 2, 3, np.nan, 90], + "c": [np.nan, 1, 2, 3, 4], + }, + index=list("VWXYZ"), + ) + + tm.assert_frame_equal(result, expected) + + def test_fillna_columns(self): + df = DataFrame(np.random.randn(10, 10)) + df.values[:, ::2] = np.nan + + result = df.fillna(method="ffill", axis=1) + expected = df.T.fillna(method="pad").T + tm.assert_frame_equal(result, expected) + + df.insert(6, "foo", 5) + result = df.fillna(method="ffill", axis=1) + expected = df.astype(float).fillna(method="ffill", axis=1) + tm.assert_frame_equal(result, expected) + + def test_fillna_invalid_method(self, float_frame): + with pytest.raises(ValueError, match="ffil"): + float_frame.fillna(method="ffil") + + def test_fillna_invalid_value(self, float_frame): + # list + msg = '"value" parameter must be a scalar or dict, but you passed a "{}"' + with pytest.raises(TypeError, match=msg.format("list")): + float_frame.fillna([1, 2]) + # tuple + with pytest.raises(TypeError, match=msg.format("tuple")): + float_frame.fillna((1, 2)) + # frame with series + msg = ( + '"value" parameter must be a scalar, dict or Series, but you ' + 'passed a "DataFrame"' + ) + with pytest.raises(TypeError, match=msg): + float_frame.iloc[:, 0].fillna(float_frame) + + def test_fillna_col_reordering(self): + cols = ["COL." + str(i) for i in range(5, 0, -1)] + data = np.random.rand(20, 5) + df = DataFrame(index=range(20), columns=cols, data=data) + filled = df.fillna(method="ffill") + assert df.columns.tolist() == filled.columns.tolist() + + def test_fill_corner(self, float_frame, float_string_frame): + mf = float_string_frame + mf.loc[mf.index[5:20], "foo"] = np.nan + mf.loc[mf.index[-10:], "A"] = np.nan + + filled = float_string_frame.fillna(value=0) + assert (filled.loc[filled.index[5:20], "foo"] == 0).all() + del float_string_frame["foo"] + + empty_float = float_frame.reindex(columns=[]) + + # TODO(wesm): unused? + result = empty_float.fillna(value=0) # noqa diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 4b33fd7832cb8..9cbfee5e663ae 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -5,9 +5,8 @@ import pytest import pandas as pd -from pandas import Categorical, DataFrame, Series, Timestamp, date_range +from pandas import DataFrame, Series import pandas._testing as tm -from pandas.tests.frame.common import _check_mixed_float class TestDataFrameMissingData: @@ -208,513 +207,3 @@ def test_dropna_categorical_interval_index(self): expected = df result = df.dropna() tm.assert_frame_equal(result, expected) - - def test_fillna_datetime(self, datetime_frame): - tf = datetime_frame - tf.loc[tf.index[:5], "A"] = np.nan - tf.loc[tf.index[-5:], "A"] = np.nan - - zero_filled = datetime_frame.fillna(0) - assert (zero_filled.loc[zero_filled.index[:5], "A"] == 0).all() - - padded = datetime_frame.fillna(method="pad") - assert np.isnan(padded.loc[padded.index[:5], "A"]).all() - assert ( - padded.loc[padded.index[-5:], "A"] == padded.loc[padded.index[-5], "A"] - ).all() - - msg = "Must specify a fill 'value' or 'method'" - with pytest.raises(ValueError, match=msg): - datetime_frame.fillna() - msg = "Cannot specify both 'value' and 'method'" - with pytest.raises(ValueError, match=msg): - datetime_frame.fillna(5, method="ffill") - - def test_fillna_mixed_type(self, float_string_frame): - - mf = float_string_frame - mf.loc[mf.index[5:20], "foo"] = np.nan - mf.loc[mf.index[-10:], "A"] = np.nan - # TODO: make stronger assertion here, GH 25640 - mf.fillna(value=0) - mf.fillna(method="pad") - - def test_fillna_mixed_float(self, mixed_float_frame): - - # mixed numeric (but no float16) - mf = mixed_float_frame.reindex(columns=["A", "B", "D"]) - mf.loc[mf.index[-10:], "A"] = np.nan - result = mf.fillna(value=0) - _check_mixed_float(result, dtype=dict(C=None)) - - result = mf.fillna(method="pad") - _check_mixed_float(result, dtype=dict(C=None)) - - def test_fillna_empty(self): - # empty frame (GH #2778) - df = DataFrame(columns=["x"]) - for m in ["pad", "backfill"]: - df.x.fillna(method=m, inplace=True) - df.x.fillna(method=m) - - def test_fillna_different_dtype(self): - # with different dtype (GH#3386) - df = DataFrame( - [["a", "a", np.nan, "a"], ["b", "b", np.nan, "b"], ["c", "c", np.nan, "c"]] - ) - - result = df.fillna({2: "foo"}) - expected = DataFrame( - [["a", "a", "foo", "a"], ["b", "b", "foo", "b"], ["c", "c", "foo", "c"]] - ) - tm.assert_frame_equal(result, expected) - - return_value = df.fillna({2: "foo"}, inplace=True) - tm.assert_frame_equal(df, expected) - assert return_value is None - - def test_fillna_limit_and_value(self): - # limit and value - df = DataFrame(np.random.randn(10, 3)) - df.iloc[2:7, 0] = np.nan - df.iloc[3:5, 2] = np.nan - - expected = df.copy() - expected.iloc[2, 0] = 999 - expected.iloc[3, 2] = 999 - result = df.fillna(999, limit=1) - tm.assert_frame_equal(result, expected) - - def test_fillna_datelike(self): - # with datelike - # GH#6344 - df = DataFrame( - { - "Date": [pd.NaT, Timestamp("2014-1-1")], - "Date2": [Timestamp("2013-1-1"), pd.NaT], - } - ) - - expected = df.copy() - expected["Date"] = expected["Date"].fillna(df.loc[df.index[0], "Date2"]) - result = df.fillna(value={"Date": df["Date2"]}) - tm.assert_frame_equal(result, expected) - - def test_fillna_tzaware(self): - # with timezone - # GH#15855 - df = DataFrame({"A": [pd.Timestamp("2012-11-11 00:00:00+01:00"), pd.NaT]}) - exp = DataFrame( - { - "A": [ - pd.Timestamp("2012-11-11 00:00:00+01:00"), - pd.Timestamp("2012-11-11 00:00:00+01:00"), - ] - } - ) - tm.assert_frame_equal(df.fillna(method="pad"), exp) - - df = DataFrame({"A": [pd.NaT, pd.Timestamp("2012-11-11 00:00:00+01:00")]}) - exp = DataFrame( - { - "A": [ - pd.Timestamp("2012-11-11 00:00:00+01:00"), - pd.Timestamp("2012-11-11 00:00:00+01:00"), - ] - } - ) - tm.assert_frame_equal(df.fillna(method="bfill"), exp) - - def test_fillna_tzaware_different_column(self): - # with timezone in another column - # GH#15522 - df = DataFrame( - { - "A": pd.date_range("20130101", periods=4, tz="US/Eastern"), - "B": [1, 2, np.nan, np.nan], - } - ) - result = df.fillna(method="pad") - expected = DataFrame( - { - "A": pd.date_range("20130101", periods=4, tz="US/Eastern"), - "B": [1.0, 2.0, 2.0, 2.0], - } - ) - tm.assert_frame_equal(result, expected) - - def test_na_actions_categorical(self): - - cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3]) - vals = ["a", "b", np.nan, "d"] - df = DataFrame({"cats": cat, "vals": vals}) - cat2 = Categorical([1, 2, 3, 3], categories=[1, 2, 3]) - vals2 = ["a", "b", "b", "d"] - df_exp_fill = DataFrame({"cats": cat2, "vals": vals2}) - cat3 = Categorical([1, 2, 3], categories=[1, 2, 3]) - vals3 = ["a", "b", np.nan] - df_exp_drop_cats = DataFrame({"cats": cat3, "vals": vals3}) - cat4 = Categorical([1, 2], categories=[1, 2, 3]) - vals4 = ["a", "b"] - df_exp_drop_all = DataFrame({"cats": cat4, "vals": vals4}) - - # fillna - res = df.fillna(value={"cats": 3, "vals": "b"}) - tm.assert_frame_equal(res, df_exp_fill) - - msg = "'fill_value=4' is not present in this Categorical's categories" - with pytest.raises(ValueError, match=msg): - df.fillna(value={"cats": 4, "vals": "c"}) - - res = df.fillna(method="pad") - tm.assert_frame_equal(res, df_exp_fill) - - # dropna - res = df.dropna(subset=["cats"]) - tm.assert_frame_equal(res, df_exp_drop_cats) - - res = df.dropna() - tm.assert_frame_equal(res, df_exp_drop_all) - - # make sure that fillna takes missing values into account - c = Categorical([np.nan, "b", np.nan], categories=["a", "b"]) - df = DataFrame({"cats": c, "vals": [1, 2, 3]}) - - cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"]) - df_exp = DataFrame({"cats": cat_exp, "vals": [1, 2, 3]}) - - res = df.fillna("a") - tm.assert_frame_equal(res, df_exp) - - def test_fillna_categorical_nan(self): - # GH 14021 - # np.nan should always be a valid filler - cat = Categorical([np.nan, 2, np.nan]) - val = Categorical([np.nan, np.nan, np.nan]) - df = DataFrame({"cats": cat, "vals": val}) - - # GH#32950 df.median() is poorly behaved because there is no - # Categorical.median - median = Series({"cats": 2.0, "vals": np.nan}) - - res = df.fillna(median) - v_exp = [np.nan, np.nan, np.nan] - df_exp = DataFrame({"cats": [2, 2, 2], "vals": v_exp}, dtype="category") - tm.assert_frame_equal(res, df_exp) - - result = df.cats.fillna(np.nan) - tm.assert_series_equal(result, df.cats) - - result = df.vals.fillna(np.nan) - tm.assert_series_equal(result, df.vals) - - idx = pd.DatetimeIndex( - ["2011-01-01 09:00", "2016-01-01 23:45", "2011-01-01 09:00", pd.NaT, pd.NaT] - ) - df = DataFrame({"a": Categorical(idx)}) - tm.assert_frame_equal(df.fillna(value=pd.NaT), df) - - idx = pd.PeriodIndex( - ["2011-01", "2011-01", "2011-01", pd.NaT, pd.NaT], freq="M" - ) - df = DataFrame({"a": Categorical(idx)}) - tm.assert_frame_equal(df.fillna(value=pd.NaT), df) - - idx = pd.TimedeltaIndex(["1 days", "2 days", "1 days", pd.NaT, pd.NaT]) - df = DataFrame({"a": Categorical(idx)}) - tm.assert_frame_equal(df.fillna(value=pd.NaT), df) - - def test_fillna_downcast(self): - # GH 15277 - # infer int64 from float64 - df = DataFrame({"a": [1.0, np.nan]}) - result = df.fillna(0, downcast="infer") - expected = DataFrame({"a": [1, 0]}) - tm.assert_frame_equal(result, expected) - - # infer int64 from float64 when fillna value is a dict - df = DataFrame({"a": [1.0, np.nan]}) - result = df.fillna({"a": 0}, downcast="infer") - expected = DataFrame({"a": [1, 0]}) - tm.assert_frame_equal(result, expected) - - def test_fillna_dtype_conversion(self): - # make sure that fillna on an empty frame works - df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5]) - result = df.dtypes - expected = Series([np.dtype("object")] * 5, index=[1, 2, 3, 4, 5]) - tm.assert_series_equal(result, expected) - - result = df.fillna(1) - expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5]) - tm.assert_frame_equal(result, expected) - - # empty block - df = DataFrame(index=range(3), columns=["A", "B"], dtype="float64") - result = df.fillna("nan") - expected = DataFrame("nan", index=range(3), columns=["A", "B"]) - tm.assert_frame_equal(result, expected) - - # equiv of replace - df = DataFrame(dict(A=[1, np.nan], B=[1.0, 2.0])) - for v in ["", 1, np.nan, 1.0]: - expected = df.replace(np.nan, v) - result = df.fillna(v) - tm.assert_frame_equal(result, expected) - - def test_fillna_datetime_columns(self): - # GH 7095 - df = DataFrame( - { - "A": [-1, -2, np.nan], - "B": date_range("20130101", periods=3), - "C": ["foo", "bar", None], - "D": ["foo2", "bar2", None], - }, - index=date_range("20130110", periods=3), - ) - result = df.fillna("?") - expected = DataFrame( - { - "A": [-1, -2, "?"], - "B": date_range("20130101", periods=3), - "C": ["foo", "bar", "?"], - "D": ["foo2", "bar2", "?"], - }, - index=date_range("20130110", periods=3), - ) - tm.assert_frame_equal(result, expected) - - df = DataFrame( - { - "A": [-1, -2, np.nan], - "B": [pd.Timestamp("2013-01-01"), pd.Timestamp("2013-01-02"), pd.NaT], - "C": ["foo", "bar", None], - "D": ["foo2", "bar2", None], - }, - index=date_range("20130110", periods=3), - ) - result = df.fillna("?") - expected = DataFrame( - { - "A": [-1, -2, "?"], - "B": [pd.Timestamp("2013-01-01"), pd.Timestamp("2013-01-02"), "?"], - "C": ["foo", "bar", "?"], - "D": ["foo2", "bar2", "?"], - }, - index=pd.date_range("20130110", periods=3), - ) - tm.assert_frame_equal(result, expected) - - def test_ffill(self, datetime_frame): - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan - - tm.assert_frame_equal( - datetime_frame.ffill(), datetime_frame.fillna(method="ffill") - ) - - def test_bfill(self, datetime_frame): - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan - - tm.assert_frame_equal( - datetime_frame.bfill(), datetime_frame.fillna(method="bfill") - ) - - def test_frame_pad_backfill_limit(self): - index = np.arange(10) - df = DataFrame(np.random.randn(10, 4), index=index) - - result = df[:2].reindex(index, method="pad", limit=5) - - expected = df[:2].reindex(index).fillna(method="pad") - expected.values[-3:] = np.nan - tm.assert_frame_equal(result, expected) - - result = df[-2:].reindex(index, method="backfill", limit=5) - - expected = df[-2:].reindex(index).fillna(method="backfill") - expected.values[:3] = np.nan - tm.assert_frame_equal(result, expected) - - def test_frame_fillna_limit(self): - index = np.arange(10) - df = DataFrame(np.random.randn(10, 4), index=index) - - result = df[:2].reindex(index) - result = result.fillna(method="pad", limit=5) - - expected = df[:2].reindex(index).fillna(method="pad") - expected.values[-3:] = np.nan - tm.assert_frame_equal(result, expected) - - result = df[-2:].reindex(index) - result = result.fillna(method="backfill", limit=5) - - expected = df[-2:].reindex(index).fillna(method="backfill") - expected.values[:3] = np.nan - tm.assert_frame_equal(result, expected) - - def test_fillna_skip_certain_blocks(self): - # don't try to fill boolean, int blocks - - df = DataFrame(np.random.randn(10, 4).astype(int)) - - # it works! - df.fillna(np.nan) - - @pytest.mark.parametrize("type", [int, float]) - def test_fillna_positive_limit(self, type): - df = DataFrame(np.random.randn(10, 4)).astype(type) - - msg = "Limit must be greater than 0" - with pytest.raises(ValueError, match=msg): - df.fillna(0, limit=-5) - - @pytest.mark.parametrize("type", [int, float]) - def test_fillna_integer_limit(self, type): - df = DataFrame(np.random.randn(10, 4)).astype(type) - - msg = "Limit must be an integer" - with pytest.raises(ValueError, match=msg): - df.fillna(0, limit=0.5) - - def test_fillna_inplace(self): - df = DataFrame(np.random.randn(10, 4)) - df[1][:4] = np.nan - df[3][-4:] = np.nan - - expected = df.fillna(value=0) - assert expected is not df - - df.fillna(value=0, inplace=True) - tm.assert_frame_equal(df, expected) - - expected = df.fillna(value={0: 0}, inplace=True) - assert expected is None - - df[1][:4] = np.nan - df[3][-4:] = np.nan - expected = df.fillna(method="ffill") - assert expected is not df - - df.fillna(method="ffill", inplace=True) - tm.assert_frame_equal(df, expected) - - def test_fillna_dict_series(self): - df = DataFrame( - { - "a": [np.nan, 1, 2, np.nan, np.nan], - "b": [1, 2, 3, np.nan, np.nan], - "c": [np.nan, 1, 2, 3, 4], - } - ) - - result = df.fillna({"a": 0, "b": 5}) - - expected = df.copy() - expected["a"] = expected["a"].fillna(0) - expected["b"] = expected["b"].fillna(5) - tm.assert_frame_equal(result, expected) - - # it works - result = df.fillna({"a": 0, "b": 5, "d": 7}) - - # Series treated same as dict - result = df.fillna(df.max()) - expected = df.fillna(df.max().to_dict()) - tm.assert_frame_equal(result, expected) - - # disable this for now - with pytest.raises(NotImplementedError, match="column by column"): - df.fillna(df.max(1), axis=1) - - def test_fillna_dataframe(self): - # GH 8377 - df = DataFrame( - { - "a": [np.nan, 1, 2, np.nan, np.nan], - "b": [1, 2, 3, np.nan, np.nan], - "c": [np.nan, 1, 2, 3, 4], - }, - index=list("VWXYZ"), - ) - - # df2 may have different index and columns - df2 = DataFrame( - { - "a": [np.nan, 10, 20, 30, 40], - "b": [50, 60, 70, 80, 90], - "foo": ["bar"] * 5, - }, - index=list("VWXuZ"), - ) - - result = df.fillna(df2) - - # only those columns and indices which are shared get filled - expected = DataFrame( - { - "a": [np.nan, 1, 2, np.nan, 40], - "b": [1, 2, 3, np.nan, 90], - "c": [np.nan, 1, 2, 3, 4], - }, - index=list("VWXYZ"), - ) - - tm.assert_frame_equal(result, expected) - - def test_fillna_columns(self): - df = DataFrame(np.random.randn(10, 10)) - df.values[:, ::2] = np.nan - - result = df.fillna(method="ffill", axis=1) - expected = df.T.fillna(method="pad").T - tm.assert_frame_equal(result, expected) - - df.insert(6, "foo", 5) - result = df.fillna(method="ffill", axis=1) - expected = df.astype(float).fillna(method="ffill", axis=1) - tm.assert_frame_equal(result, expected) - - def test_fillna_invalid_method(self, float_frame): - with pytest.raises(ValueError, match="ffil"): - float_frame.fillna(method="ffil") - - def test_fillna_invalid_value(self, float_frame): - # list - msg = '"value" parameter must be a scalar or dict, but you passed a "{}"' - with pytest.raises(TypeError, match=msg.format("list")): - float_frame.fillna([1, 2]) - # tuple - with pytest.raises(TypeError, match=msg.format("tuple")): - float_frame.fillna((1, 2)) - # frame with series - msg = ( - '"value" parameter must be a scalar, dict or Series, but you ' - 'passed a "DataFrame"' - ) - with pytest.raises(TypeError, match=msg): - float_frame.iloc[:, 0].fillna(float_frame) - - def test_fillna_col_reordering(self): - cols = ["COL." + str(i) for i in range(5, 0, -1)] - data = np.random.rand(20, 5) - df = DataFrame(index=range(20), columns=cols, data=data) - filled = df.fillna(method="ffill") - assert df.columns.tolist() == filled.columns.tolist() - - def test_fill_corner(self, float_frame, float_string_frame): - mf = float_string_frame - mf.loc[mf.index[5:20], "foo"] = np.nan - mf.loc[mf.index[-10:], "A"] = np.nan - - filled = float_string_frame.fillna(value=0) - assert (filled.loc[filled.index[5:20], "foo"] == 0).all() - del float_string_frame["foo"] - - empty_float = float_frame.reindex(columns=[]) - - # TODO(wesm): unused? - result = empty_float.fillna(value=0) # noqa diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index b6a6f4e8200d4..02214d66347e1 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -1,13 +1,504 @@ -from datetime import timedelta +from datetime import datetime, timedelta import numpy as np import pytest +import pytz -from pandas import Categorical, DataFrame, NaT, Period, Series, Timedelta, Timestamp +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + NaT, + Period, + Series, + Timedelta, + Timestamp, + isna, +) import pandas._testing as tm class TestSeriesFillNA: + def test_fillna(self, datetime_series): + ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5)) + + tm.assert_series_equal(ts, ts.fillna(method="ffill")) + + ts[2] = np.NaN + + exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index) + tm.assert_series_equal(ts.fillna(method="ffill"), exp) + + exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index) + tm.assert_series_equal(ts.fillna(method="backfill"), exp) + + exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index) + tm.assert_series_equal(ts.fillna(value=5), exp) + + msg = "Must specify a fill 'value' or 'method'" + with pytest.raises(ValueError, match=msg): + ts.fillna() + + msg = "Cannot specify both 'value' and 'method'" + with pytest.raises(ValueError, match=msg): + datetime_series.fillna(value=0, method="ffill") + + # GH#5703 + s1 = Series([np.nan]) + s2 = Series([1]) + result = s1.fillna(s2) + expected = Series([1.0]) + tm.assert_series_equal(result, expected) + result = s1.fillna({}) + tm.assert_series_equal(result, s1) + result = s1.fillna(Series((), dtype=object)) + tm.assert_series_equal(result, s1) + result = s2.fillna(s1) + tm.assert_series_equal(result, s2) + result = s1.fillna({0: 1}) + tm.assert_series_equal(result, expected) + result = s1.fillna({1: 1}) + tm.assert_series_equal(result, Series([np.nan])) + result = s1.fillna({0: 1, 1: 1}) + tm.assert_series_equal(result, expected) + result = s1.fillna(Series({0: 1, 1: 1})) + tm.assert_series_equal(result, expected) + result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5])) + tm.assert_series_equal(result, s1) + + s1 = Series([0, 1, 2], list("abc")) + s2 = Series([0, np.nan, 2], list("bac")) + result = s2.fillna(s1) + expected = Series([0, 0, 2.0], list("bac")) + tm.assert_series_equal(result, expected) + + # limit + ser = Series(np.nan, index=[0, 1, 2]) + result = ser.fillna(999, limit=1) + expected = Series([999, np.nan, np.nan], index=[0, 1, 2]) + tm.assert_series_equal(result, expected) + + result = ser.fillna(999, limit=2) + expected = Series([999, 999, np.nan], index=[0, 1, 2]) + tm.assert_series_equal(result, expected) + + # GH#9043 + # make sure a string representation of int/float values can be filled + # correctly without raising errors or being converted + vals = ["0", "1.5", "-0.3"] + for val in vals: + ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64") + result = ser.fillna(val) + expected = Series([0, 1, val, val, 4], dtype="object") + tm.assert_series_equal(result, expected) + + def test_fillna_consistency(self): + # GH#16402 + # fillna with a tz aware to a tz-naive, should result in object + + ser = Series([Timestamp("20130101"), NaT]) + + result = ser.fillna(Timestamp("20130101", tz="US/Eastern")) + expected = Series( + [Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")], + dtype="object", + ) + tm.assert_series_equal(result, expected) + + # where (we ignore the errors=) + result = ser.where( + [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" + ) + tm.assert_series_equal(result, expected) + + result = ser.where( + [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" + ) + tm.assert_series_equal(result, expected) + + # with a non-datetime + result = ser.fillna("foo") + expected = Series([Timestamp("20130101"), "foo"]) + tm.assert_series_equal(result, expected) + + # assignment + ser2 = ser.copy() + ser2[1] = "foo" + tm.assert_series_equal(ser2, expected) + + def test_fillna_downcast(self): + # GH#15277 + # infer int64 from float64 + ser = Series([1.0, np.nan]) + result = ser.fillna(0, downcast="infer") + expected = Series([1, 0]) + tm.assert_series_equal(result, expected) + + # infer int64 from float64 when fillna value is a dict + ser = Series([1.0, np.nan]) + result = ser.fillna({1: 0}, downcast="infer") + expected = Series([1, 0]) + tm.assert_series_equal(result, expected) + + def test_timedelta_fillna(self): + # GH#3371 + ser = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130102"), + Timestamp("20130103 9:01:01"), + ] + ) + td = ser.diff() + + # reg fillna + result = td.fillna(Timedelta(seconds=0)) + expected = Series( + [ + timedelta(0), + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ] + ) + tm.assert_series_equal(result, expected) + + # interpreted as seconds, deprecated + with pytest.raises(TypeError, match="Passing integers to fillna"): + td.fillna(1) + + result = td.fillna(Timedelta(seconds=1)) + expected = Series( + [ + timedelta(seconds=1), + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ] + ) + tm.assert_series_equal(result, expected) + + result = td.fillna(timedelta(days=1, seconds=1)) + expected = Series( + [ + timedelta(days=1, seconds=1), + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ] + ) + tm.assert_series_equal(result, expected) + + result = td.fillna(np.timedelta64(int(1e9))) + expected = Series( + [ + timedelta(seconds=1), + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ] + ) + tm.assert_series_equal(result, expected) + + result = td.fillna(NaT) + expected = Series( + [ + NaT, + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ], + dtype="m8[ns]", + ) + tm.assert_series_equal(result, expected) + + # ffill + td[2] = np.nan + result = td.ffill() + expected = td.fillna(Timedelta(seconds=0)) + expected[0] = np.nan + tm.assert_series_equal(result, expected) + + # bfill + td[2] = np.nan + result = td.bfill() + expected = td.fillna(Timedelta(seconds=0)) + expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1) + tm.assert_series_equal(result, expected) + + def test_datetime64_fillna(self): + + ser = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130102"), + Timestamp("20130103 9:01:01"), + ] + ) + ser[2] = np.nan + + # ffill + result = ser.ffill() + expected = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130103 9:01:01"), + ] + ) + tm.assert_series_equal(result, expected) + + # bfill + result = ser.bfill() + expected = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130103 9:01:01"), + Timestamp("20130103 9:01:01"), + ] + ) + tm.assert_series_equal(result, expected) + + # GH#6587 + # make sure that we are treating as integer when filling + # this also tests inference of a datetime-like with NaT's + ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"]) + expected = Series( + [ + "2013-08-05 15:30:00.000001", + "2013-08-05 15:30:00.000001", + "2013-08-05 15:30:00.000001", + ], + dtype="M8[ns]", + ) + result = ser.fillna(method="backfill") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"]) + def test_datetime64_tz_fillna(self, tz): + # DatetimeBlock + ser = Series( + [ + Timestamp("2011-01-01 10:00"), + NaT, + Timestamp("2011-01-03 10:00"), + NaT, + ] + ) + null_loc = Series([False, True, False, True]) + + result = ser.fillna(Timestamp("2011-01-02 10:00")) + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00"), + Timestamp("2011-01-03 10:00"), + Timestamp("2011-01-02 10:00"), + ] + ) + tm.assert_series_equal(expected, result) + # check s is not changed + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz)) + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00", tz=tz), + Timestamp("2011-01-03 10:00"), + Timestamp("2011-01-02 10:00", tz=tz), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna("AAA") + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + "AAA", + Timestamp("2011-01-03 10:00"), + "AAA", + ], + dtype=object, + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna( + { + 1: Timestamp("2011-01-02 10:00", tz=tz), + 3: Timestamp("2011-01-04 10:00"), + } + ) + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00", tz=tz), + Timestamp("2011-01-03 10:00"), + Timestamp("2011-01-04 10:00"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna( + {1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")} + ) + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00"), + Timestamp("2011-01-03 10:00"), + Timestamp("2011-01-04 10:00"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + # DatetimeBlockTZ + idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz) + ser = Series(idx) + assert ser.dtype == f"datetime64[ns, {tz}]" + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna(Timestamp("2011-01-02 10:00")) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2011-01-02 10:00"), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2011-01-02 10:00"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz)) + idx = DatetimeIndex( + [ + "2011-01-01 10:00", + "2011-01-02 10:00", + "2011-01-03 10:00", + "2011-01-02 10:00", + ], + tz=tz, + ) + expected = Series(idx) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime()) + idx = DatetimeIndex( + [ + "2011-01-01 10:00", + "2011-01-02 10:00", + "2011-01-03 10:00", + "2011-01-02 10:00", + ], + tz=tz, + ) + expected = Series(idx) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna("AAA") + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + "AAA", + Timestamp("2011-01-03 10:00", tz=tz), + "AAA", + ], + dtype=object, + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna( + { + 1: Timestamp("2011-01-02 10:00", tz=tz), + 3: Timestamp("2011-01-04 10:00"), + } + ) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2011-01-02 10:00", tz=tz), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2011-01-04 10:00"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna( + { + 1: Timestamp("2011-01-02 10:00", tz=tz), + 3: Timestamp("2011-01-04 10:00", tz=tz), + } + ) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2011-01-02 10:00", tz=tz), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2011-01-04 10:00", tz=tz), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + # filling with a naive/other zone, coerce to object + result = ser.fillna(Timestamp("20130101")) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2013-01-01"), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2013-01-01"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna(Timestamp("20130101", tz="US/Pacific")) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2013-01-01", tz="US/Pacific"), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2013-01-01", tz="US/Pacific"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + def test_fillna_dt64tz_with_method(self): + # with timezone + # GH#15855 + ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT]) + exp = Series( + [ + Timestamp("2012-11-11 00:00:00+01:00"), + Timestamp("2012-11-11 00:00:00+01:00"), + ] + ) + tm.assert_series_equal(ser.fillna(method="pad"), exp) + + ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")]) + exp = Series( + [ + Timestamp("2012-11-11 00:00:00+01:00"), + Timestamp("2012-11-11 00:00:00+01:00"), + ] + ) + tm.assert_series_equal(ser.fillna(method="bfill"), exp) + def test_fillna_pytimedelta(self): # GH#8209 ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"]) @@ -153,6 +644,12 @@ def test_fillna_categorical_raises(self): # --------------------------------------------------------------- # Invalid Usages + def test_fillna_invalid_method(self, datetime_series): + try: + datetime_series.fillna(method="ffil") + except ValueError as inst: + assert "ffil" in str(inst) + def test_fillna_listlike_invalid(self): ser = Series(np.random.randint(-100, 100, 50)) msg = '"value" parameter must be a scalar or dict, but you passed a "list"' @@ -176,3 +673,104 @@ def test_fillna_method_and_limit_invalid(self): for method in ["backfill", "bfill", "pad", "ffill", None]: with pytest.raises(ValueError, match=msg): ser.fillna(1, limit=limit, method=method) + + +class TestFillnaPad: + def test_fillna_bug(self): + ser = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"]) + filled = ser.fillna(method="ffill") + expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], ser.index) + tm.assert_series_equal(filled, expected) + + filled = ser.fillna(method="bfill") + expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], ser.index) + tm.assert_series_equal(filled, expected) + + def test_ffill(self): + ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5)) + ts[2] = np.NaN + tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill")) + + def test_ffill_mixed_dtypes_without_missing_data(self): + # GH#14956 + series = Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1]) + result = series.ffill() + tm.assert_series_equal(series, result) + + def test_bfill(self): + ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5)) + ts[2] = np.NaN + tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill")) + + def test_pad_nan(self): + x = Series( + [np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"], dtype=float + ) + + return_value = x.fillna(method="pad", inplace=True) + assert return_value is None + + expected = Series( + [np.nan, 1.0, 1.0, 3.0, 3.0], ["z", "a", "b", "c", "d"], dtype=float + ) + tm.assert_series_equal(x[1:], expected[1:]) + assert np.isnan(x[0]), np.isnan(expected[0]) + + def test_series_fillna_limit(self): + index = np.arange(10) + s = Series(np.random.randn(10), index=index) + + result = s[:2].reindex(index) + result = result.fillna(method="pad", limit=5) + + expected = s[:2].reindex(index).fillna(method="pad") + expected[-3:] = np.nan + tm.assert_series_equal(result, expected) + + result = s[-2:].reindex(index) + result = result.fillna(method="bfill", limit=5) + + expected = s[-2:].reindex(index).fillna(method="backfill") + expected[:3] = np.nan + tm.assert_series_equal(result, expected) + + def test_series_pad_backfill_limit(self): + index = np.arange(10) + s = Series(np.random.randn(10), index=index) + + result = s[:2].reindex(index, method="pad", limit=5) + + expected = s[:2].reindex(index).fillna(method="pad") + expected[-3:] = np.nan + tm.assert_series_equal(result, expected) + + result = s[-2:].reindex(index, method="backfill", limit=5) + + expected = s[-2:].reindex(index).fillna(method="backfill") + expected[:3] = np.nan + tm.assert_series_equal(result, expected) + + def test_fillna_int(self): + ser = Series(np.random.randint(-100, 100, 50)) + return_value = ser.fillna(method="ffill", inplace=True) + assert return_value is None + tm.assert_series_equal(ser.fillna(method="ffill", inplace=False), ser) + + def test_datetime64tz_fillna_round_issue(self): + # GH#14872 + + data = Series( + [NaT, NaT, datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc)] + ) + + filled = data.fillna(method="bfill") + + expected = Series( + [ + datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc), + datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc), + datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc), + ] + ) + + tm.assert_series_equal(filled, expected) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 712921f70e46f..6d8d0703acdb5 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -1,8 +1,7 @@ -from datetime import datetime, timedelta +from datetime import timedelta import numpy as np import pytest -import pytz from pandas._libs import iNaT @@ -14,7 +13,6 @@ IntervalIndex, NaT, Series, - Timedelta, Timestamp, date_range, isna, @@ -23,440 +21,6 @@ class TestSeriesMissingData: - def test_timedelta_fillna(self): - # GH 3371 - s = Series( - [ - Timestamp("20130101"), - Timestamp("20130101"), - Timestamp("20130102"), - Timestamp("20130103 9:01:01"), - ] - ) - td = s.diff() - - # reg fillna - result = td.fillna(Timedelta(seconds=0)) - expected = Series( - [ - timedelta(0), - timedelta(0), - timedelta(1), - timedelta(days=1, seconds=9 * 3600 + 60 + 1), - ] - ) - tm.assert_series_equal(result, expected) - - # interpreted as seconds, deprecated - with pytest.raises(TypeError, match="Passing integers to fillna"): - td.fillna(1) - - result = td.fillna(Timedelta(seconds=1)) - expected = Series( - [ - timedelta(seconds=1), - timedelta(0), - timedelta(1), - timedelta(days=1, seconds=9 * 3600 + 60 + 1), - ] - ) - tm.assert_series_equal(result, expected) - - result = td.fillna(timedelta(days=1, seconds=1)) - expected = Series( - [ - timedelta(days=1, seconds=1), - timedelta(0), - timedelta(1), - timedelta(days=1, seconds=9 * 3600 + 60 + 1), - ] - ) - tm.assert_series_equal(result, expected) - - result = td.fillna(np.timedelta64(int(1e9))) - expected = Series( - [ - timedelta(seconds=1), - timedelta(0), - timedelta(1), - timedelta(days=1, seconds=9 * 3600 + 60 + 1), - ] - ) - tm.assert_series_equal(result, expected) - - result = td.fillna(NaT) - expected = Series( - [ - NaT, - timedelta(0), - timedelta(1), - timedelta(days=1, seconds=9 * 3600 + 60 + 1), - ], - dtype="m8[ns]", - ) - tm.assert_series_equal(result, expected) - - # ffill - td[2] = np.nan - result = td.ffill() - expected = td.fillna(Timedelta(seconds=0)) - expected[0] = np.nan - tm.assert_series_equal(result, expected) - - # bfill - td[2] = np.nan - result = td.bfill() - expected = td.fillna(Timedelta(seconds=0)) - expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1) - tm.assert_series_equal(result, expected) - - def test_datetime64_fillna(self): - - s = Series( - [ - Timestamp("20130101"), - Timestamp("20130101"), - Timestamp("20130102"), - Timestamp("20130103 9:01:01"), - ] - ) - s[2] = np.nan - - # ffill - result = s.ffill() - expected = Series( - [ - Timestamp("20130101"), - Timestamp("20130101"), - Timestamp("20130101"), - Timestamp("20130103 9:01:01"), - ] - ) - tm.assert_series_equal(result, expected) - - # bfill - result = s.bfill() - expected = Series( - [ - Timestamp("20130101"), - Timestamp("20130101"), - Timestamp("20130103 9:01:01"), - Timestamp("20130103 9:01:01"), - ] - ) - tm.assert_series_equal(result, expected) - - # GH 6587 - # make sure that we are treating as integer when filling - # this also tests inference of a datetime-like with NaT's - s = Series([pd.NaT, pd.NaT, "2013-08-05 15:30:00.000001"]) - expected = Series( - [ - "2013-08-05 15:30:00.000001", - "2013-08-05 15:30:00.000001", - "2013-08-05 15:30:00.000001", - ], - dtype="M8[ns]", - ) - result = s.fillna(method="backfill") - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"]) - def test_datetime64_tz_fillna(self, tz): - # DatetimeBlock - s = Series( - [ - Timestamp("2011-01-01 10:00"), - pd.NaT, - Timestamp("2011-01-03 10:00"), - pd.NaT, - ] - ) - null_loc = Series([False, True, False, True]) - - result = s.fillna(pd.Timestamp("2011-01-02 10:00")) - expected = Series( - [ - Timestamp("2011-01-01 10:00"), - Timestamp("2011-01-02 10:00"), - Timestamp("2011-01-03 10:00"), - Timestamp("2011-01-02 10:00"), - ] - ) - tm.assert_series_equal(expected, result) - # check s is not changed - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz)) - expected = Series( - [ - Timestamp("2011-01-01 10:00"), - Timestamp("2011-01-02 10:00", tz=tz), - Timestamp("2011-01-03 10:00"), - Timestamp("2011-01-02 10:00", tz=tz), - ] - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna("AAA") - expected = Series( - [ - Timestamp("2011-01-01 10:00"), - "AAA", - Timestamp("2011-01-03 10:00"), - "AAA", - ], - dtype=object, - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna( - { - 1: pd.Timestamp("2011-01-02 10:00", tz=tz), - 3: pd.Timestamp("2011-01-04 10:00"), - } - ) - expected = Series( - [ - Timestamp("2011-01-01 10:00"), - Timestamp("2011-01-02 10:00", tz=tz), - Timestamp("2011-01-03 10:00"), - Timestamp("2011-01-04 10:00"), - ] - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna( - {1: pd.Timestamp("2011-01-02 10:00"), 3: pd.Timestamp("2011-01-04 10:00")} - ) - expected = Series( - [ - Timestamp("2011-01-01 10:00"), - Timestamp("2011-01-02 10:00"), - Timestamp("2011-01-03 10:00"), - Timestamp("2011-01-04 10:00"), - ] - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - # DatetimeBlockTZ - idx = pd.DatetimeIndex( - ["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz=tz - ) - s = Series(idx) - assert s.dtype == f"datetime64[ns, {tz}]" - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna(pd.Timestamp("2011-01-02 10:00")) - expected = Series( - [ - Timestamp("2011-01-01 10:00", tz=tz), - Timestamp("2011-01-02 10:00"), - Timestamp("2011-01-03 10:00", tz=tz), - Timestamp("2011-01-02 10:00"), - ] - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz)) - idx = pd.DatetimeIndex( - [ - "2011-01-01 10:00", - "2011-01-02 10:00", - "2011-01-03 10:00", - "2011-01-02 10:00", - ], - tz=tz, - ) - expected = Series(idx) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime()) - idx = pd.DatetimeIndex( - [ - "2011-01-01 10:00", - "2011-01-02 10:00", - "2011-01-03 10:00", - "2011-01-02 10:00", - ], - tz=tz, - ) - expected = Series(idx) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna("AAA") - expected = Series( - [ - Timestamp("2011-01-01 10:00", tz=tz), - "AAA", - Timestamp("2011-01-03 10:00", tz=tz), - "AAA", - ], - dtype=object, - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna( - { - 1: pd.Timestamp("2011-01-02 10:00", tz=tz), - 3: pd.Timestamp("2011-01-04 10:00"), - } - ) - expected = Series( - [ - Timestamp("2011-01-01 10:00", tz=tz), - Timestamp("2011-01-02 10:00", tz=tz), - Timestamp("2011-01-03 10:00", tz=tz), - Timestamp("2011-01-04 10:00"), - ] - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna( - { - 1: pd.Timestamp("2011-01-02 10:00", tz=tz), - 3: pd.Timestamp("2011-01-04 10:00", tz=tz), - } - ) - expected = Series( - [ - Timestamp("2011-01-01 10:00", tz=tz), - Timestamp("2011-01-02 10:00", tz=tz), - Timestamp("2011-01-03 10:00", tz=tz), - Timestamp("2011-01-04 10:00", tz=tz), - ] - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - # filling with a naive/other zone, coerce to object - result = s.fillna(Timestamp("20130101")) - expected = Series( - [ - Timestamp("2011-01-01 10:00", tz=tz), - Timestamp("2013-01-01"), - Timestamp("2011-01-03 10:00", tz=tz), - Timestamp("2013-01-01"), - ] - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - result = s.fillna(Timestamp("20130101", tz="US/Pacific")) - expected = Series( - [ - Timestamp("2011-01-01 10:00", tz=tz), - Timestamp("2013-01-01", tz="US/Pacific"), - Timestamp("2011-01-03 10:00", tz=tz), - Timestamp("2013-01-01", tz="US/Pacific"), - ] - ) - tm.assert_series_equal(expected, result) - tm.assert_series_equal(pd.isna(s), null_loc) - - def test_fillna_dt64tz_with_method(self): - # with timezone - # GH 15855 - ser = Series([pd.Timestamp("2012-11-11 00:00:00+01:00"), pd.NaT]) - exp = Series( - [ - pd.Timestamp("2012-11-11 00:00:00+01:00"), - pd.Timestamp("2012-11-11 00:00:00+01:00"), - ] - ) - tm.assert_series_equal(ser.fillna(method="pad"), exp) - - ser = Series([pd.NaT, pd.Timestamp("2012-11-11 00:00:00+01:00")]) - exp = Series( - [ - pd.Timestamp("2012-11-11 00:00:00+01:00"), - pd.Timestamp("2012-11-11 00:00:00+01:00"), - ] - ) - tm.assert_series_equal(ser.fillna(method="bfill"), exp) - - def test_fillna_consistency(self): - # GH 16402 - # fillna with a tz aware to a tz-naive, should result in object - - s = Series([Timestamp("20130101"), pd.NaT]) - - result = s.fillna(Timestamp("20130101", tz="US/Eastern")) - expected = Series( - [Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")], - dtype="object", - ) - tm.assert_series_equal(result, expected) - - # where (we ignore the errors=) - result = s.where( - [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" - ) - tm.assert_series_equal(result, expected) - - result = s.where( - [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" - ) - tm.assert_series_equal(result, expected) - - # with a non-datetime - result = s.fillna("foo") - expected = Series([Timestamp("20130101"), "foo"]) - tm.assert_series_equal(result, expected) - - # assignment - s2 = s.copy() - s2[1] = "foo" - tm.assert_series_equal(s2, expected) - - def test_datetime64tz_fillna_round_issue(self): - # GH 14872 - - data = Series( - [pd.NaT, pd.NaT, datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc)] - ) - - filled = data.fillna(method="bfill") - - expected = Series( - [ - datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc), - datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc), - datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc), - ] - ) - - tm.assert_series_equal(filled, expected) - - def test_fillna_downcast(self): - # GH 15277 - # infer int64 from float64 - s = Series([1.0, np.nan]) - result = s.fillna(0, downcast="infer") - expected = Series([1, 0]) - tm.assert_series_equal(result, expected) - - # infer int64 from float64 when fillna value is a dict - s = Series([1.0, np.nan]) - result = s.fillna({1: 0}, downcast="infer") - expected = Series([1, 0]) - tm.assert_series_equal(result, expected) - - def test_fillna_int(self): - s = Series(np.random.randint(-100, 100, 50)) - return_value = s.fillna(method="ffill", inplace=True) - assert return_value is None - tm.assert_series_equal(s.fillna(method="ffill", inplace=False), s) - def test_categorical_nan_equality(self): cat = Series(Categorical(["a", "b", "c", np.nan])) exp = Series([True, True, True, False]) @@ -531,111 +95,6 @@ def test_isnull_for_inf_deprecated(self): tm.assert_series_equal(r, e) tm.assert_series_equal(dr, de) - def test_fillna(self, datetime_series): - ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5)) - - tm.assert_series_equal(ts, ts.fillna(method="ffill")) - - ts[2] = np.NaN - - exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index) - tm.assert_series_equal(ts.fillna(method="ffill"), exp) - - exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index) - tm.assert_series_equal(ts.fillna(method="backfill"), exp) - - exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index) - tm.assert_series_equal(ts.fillna(value=5), exp) - - msg = "Must specify a fill 'value' or 'method'" - with pytest.raises(ValueError, match=msg): - ts.fillna() - - msg = "Cannot specify both 'value' and 'method'" - with pytest.raises(ValueError, match=msg): - datetime_series.fillna(value=0, method="ffill") - - # GH 5703 - s1 = Series([np.nan]) - s2 = Series([1]) - result = s1.fillna(s2) - expected = Series([1.0]) - tm.assert_series_equal(result, expected) - result = s1.fillna({}) - tm.assert_series_equal(result, s1) - result = s1.fillna(Series((), dtype=object)) - tm.assert_series_equal(result, s1) - result = s2.fillna(s1) - tm.assert_series_equal(result, s2) - result = s1.fillna({0: 1}) - tm.assert_series_equal(result, expected) - result = s1.fillna({1: 1}) - tm.assert_series_equal(result, Series([np.nan])) - result = s1.fillna({0: 1, 1: 1}) - tm.assert_series_equal(result, expected) - result = s1.fillna(Series({0: 1, 1: 1})) - tm.assert_series_equal(result, expected) - result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5])) - tm.assert_series_equal(result, s1) - - s1 = Series([0, 1, 2], list("abc")) - s2 = Series([0, np.nan, 2], list("bac")) - result = s2.fillna(s1) - expected = Series([0, 0, 2.0], list("bac")) - tm.assert_series_equal(result, expected) - - # limit - s = Series(np.nan, index=[0, 1, 2]) - result = s.fillna(999, limit=1) - expected = Series([999, np.nan, np.nan], index=[0, 1, 2]) - tm.assert_series_equal(result, expected) - - result = s.fillna(999, limit=2) - expected = Series([999, 999, np.nan], index=[0, 1, 2]) - tm.assert_series_equal(result, expected) - - # GH 9043 - # make sure a string representation of int/float values can be filled - # correctly without raising errors or being converted - vals = ["0", "1.5", "-0.3"] - for val in vals: - s = Series([0, 1, np.nan, np.nan, 4], dtype="float64") - result = s.fillna(val) - expected = Series([0, 1, val, val, 4], dtype="object") - tm.assert_series_equal(result, expected) - - def test_fillna_bug(self): - x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"]) - filled = x.fillna(method="ffill") - expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], x.index) - tm.assert_series_equal(filled, expected) - - filled = x.fillna(method="bfill") - expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], x.index) - tm.assert_series_equal(filled, expected) - - def test_fillna_invalid_method(self, datetime_series): - try: - datetime_series.fillna(method="ffil") - except ValueError as inst: - assert "ffil" in str(inst) - - def test_ffill(self): - ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5)) - ts[2] = np.NaN - tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill")) - - def test_ffill_mixed_dtypes_without_missing_data(self): - # GH14956 - series = Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1]) - result = series.ffill() - tm.assert_series_equal(series, result) - - def test_bfill(self): - ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5)) - ts[2] = np.NaN - tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill")) - def test_timedelta64_nan(self): td = Series([timedelta(days=i) for i in range(10)]) @@ -773,20 +232,6 @@ def test_notna(self): expected = Series([True, True, False]) tm.assert_series_equal(ser.notna(), expected) - def test_pad_nan(self): - x = Series( - [np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"], dtype=float - ) - - return_value = x.fillna(method="pad", inplace=True) - assert return_value is None - - expected = Series( - [np.nan, 1.0, 1.0, 3.0, 3.0], ["z", "a", "b", "c", "d"], dtype=float - ) - tm.assert_series_equal(x[1:], expected[1:]) - assert np.isnan(x[0]), np.isnan(expected[0]) - def test_pad_require_monotonicity(self): rng = date_range("1/1/2000", "3/1/2000", freq="B") @@ -806,37 +251,3 @@ def test_dropna_preserve_name(self, datetime_series): return_value = ts.dropna(inplace=True) assert return_value is None assert ts.name == name - - def test_series_fillna_limit(self): - index = np.arange(10) - s = Series(np.random.randn(10), index=index) - - result = s[:2].reindex(index) - result = result.fillna(method="pad", limit=5) - - expected = s[:2].reindex(index).fillna(method="pad") - expected[-3:] = np.nan - tm.assert_series_equal(result, expected) - - result = s[-2:].reindex(index) - result = result.fillna(method="bfill", limit=5) - - expected = s[-2:].reindex(index).fillna(method="backfill") - expected[:3] = np.nan - tm.assert_series_equal(result, expected) - - def test_series_pad_backfill_limit(self): - index = np.arange(10) - s = Series(np.random.randn(10), index=index) - - result = s[:2].reindex(index, method="pad", limit=5) - - expected = s[:2].reindex(index).fillna(method="pad") - expected[-3:] = np.nan - tm.assert_series_equal(result, expected) - - result = s[-2:].reindex(index, method="backfill", limit=5) - - expected = s[-2:].reindex(index).fillna(method="backfill") - expected[:3] = np.nan - tm.assert_series_equal(result, expected)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37281
2020-10-20T18:22:17Z
2020-10-21T12:55:59Z
2020-10-21T12:55:59Z
2020-10-21T15:20:53Z
TST: fix warnings on multiple subplots
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 82a62c4588b94..c868c8d4fba07 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -550,41 +550,85 @@ def _unpack_cycler(self, rcParams, field="color"): return [v[field] for v in rcParams["axes.prop_cycle"]] -def _check_plot_works(f, filterwarnings="always", **kwargs): +def _check_plot_works(f, filterwarnings="always", default_axes=False, **kwargs): + """ + Create plot and ensure that plot return object is valid. + + Parameters + ---------- + f : func + Plotting function. + filterwarnings : str + Warnings filter. + See https://docs.python.org/3/library/warnings.html#warning-filter + default_axes : bool, optional + If False (default): + - If `ax` not in `kwargs`, then create subplot(211) and plot there + - Create new subplot(212) and plot there as well + - Mind special corner case for bootstrap_plot (see `_gen_two_subplots`) + If True: + - Simply run plotting function with kwargs provided + - All required axes instances will be created automatically + - It is recommended to use it when the plotting function + creates multiple axes itself. It helps avoid warnings like + 'UserWarning: To output multiple subplots, + the figure containing the passed axes is being cleared' + **kwargs + Keyword arguments passed to the plotting function. + + Returns + ------- + Plot object returned by the last plotting. + """ import matplotlib.pyplot as plt + if default_axes: + gen_plots = _gen_default_plot + else: + gen_plots = _gen_two_subplots + ret = None with warnings.catch_warnings(): warnings.simplefilter(filterwarnings) try: - try: - fig = kwargs["figure"] - except KeyError: - fig = plt.gcf() - + fig = kwargs.get("figure", plt.gcf()) plt.clf() - kwargs.get("ax", fig.add_subplot(211)) - ret = f(**kwargs) - - tm.assert_is_valid_plot_return_object(ret) - - if f is pd.plotting.bootstrap_plot: - assert "ax" not in kwargs - else: - kwargs["ax"] = fig.add_subplot(212) - - ret = f(**kwargs) - tm.assert_is_valid_plot_return_object(ret) + for ret in gen_plots(f, fig, **kwargs): + tm.assert_is_valid_plot_return_object(ret) with tm.ensure_clean(return_filelike=True) as path: plt.savefig(path) + + except Exception as err: + raise err finally: tm.close(fig) return ret +def _gen_default_plot(f, fig, **kwargs): + """ + Create plot in a default way. + """ + yield f(**kwargs) + + +def _gen_two_subplots(f, fig, **kwargs): + """ + Create plot on two subplots forcefully created. + """ + kwargs.get("ax", fig.add_subplot(211)) + yield f(**kwargs) + + if f is pd.plotting.bootstrap_plot: + assert "ax" not in kwargs + else: + kwargs["ax"] = fig.add_subplot(212) + yield f(**kwargs) + + def curpath(): pth, _ = os.path.split(os.path.abspath(__file__)) return pth diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 49335230171c6..ab0024559333e 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -152,7 +152,8 @@ def test_hist_with_legend(self, by, expected_axes_num, expected_layout): s = Series(np.random.randn(30), index=index, name="a") s.index.name = "b" - axes = _check_plot_works(s.hist, legend=True, by=by) + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works(s.hist, default_axes=True, legend=True, by=by) self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) self._check_legend_labels(axes, "a") @@ -332,7 +333,8 @@ def test_tight_layout(self): dtype=np.int64, ) ) - _check_plot_works(df.hist) + # Use default_axes=True when plotting method generate subplots itself + _check_plot_works(df.hist, default_axes=True) self.plt.tight_layout() tm.close() @@ -345,8 +347,10 @@ def test_hist_subplot_xrot(self): "animal": ["pig", "rabbit", "pig", "pig", "rabbit"], } ) + # Use default_axes=True when plotting method generate subplots itself axes = _check_plot_works( df.hist, + default_axes=True, filterwarnings="always", column="length", by="animal", @@ -374,9 +378,14 @@ def test_hist_column_order_unchanged(self, column, expected): index=["pig", "rabbit", "duck", "chicken", "horse"], ) - axes = _check_plot_works(df.hist, column=column, layout=(1, 3)) + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + column=column, + layout=(1, 3), + ) result = [axes[0, i].get_title() for i in range(3)] - assert result == expected @pytest.mark.parametrize( @@ -407,7 +416,15 @@ def test_hist_with_legend(self, by, column): index = Index(15 * ["1"] + 15 * ["2"], name="c") df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"]) - axes = _check_plot_works(df.hist, legend=True, by=by, column=column) + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + legend=True, + by=by, + column=column, + ) + self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) if by is None and column is None: axes = axes[0]
- [ ] xref #37178 partially - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This PR suggests a (partial) fix for such warnings, which take place when running test suite on ``pandas/tests/plotting``. ``` UserWarning: To output multiple subplots, the figure containing the passed axes is being cleared ``` The problem is the following. The function ``_check_plot_works`` creates subplot(211) if axes are not provided. This is fine, when the plot to be created is a single-axes plot. However, if the plot itself is to be plotted on multiple axes (for instance, df.hist would plot each column on a dedicated subplot, by default), then the warnings mentioned are emitted. Two ways to handle it: - Find cases where plot is a multiaxes-plot and catch warnings there. This is fine, but in some parametrized tests the plots are either single-axes or multiple-axes. It makes it difficult to figure out without splitting the tests. - Change function ``_check_plot_works``: do not create subplot(211). I created ``_check_single_plot_works`` which does not create subplots and used it in the problematic cases. I guess, that the name is not the best (I would prefer ``_check_plot_works``), but I encourage those concerned to have a discussion here. Please see comments in ``_check_plot_works`` (I will remove them once we come to the conclusion on the topic).
https://api.github.com/repos/pandas-dev/pandas/pulls/37274
2020-10-20T12:16:02Z
2020-11-09T18:55:08Z
2020-11-09T18:55:08Z
2020-11-10T04:45:40Z
REF: collect tests by method
diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index aa44a2427dc8f..da369658078a0 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -420,3 +420,24 @@ def test_drop_preserve_names(self): result = df.drop([(0, 2)]) assert result.index.names == ("one", "two") + + @pytest.mark.parametrize( + "operation", ["__iadd__", "__isub__", "__imul__", "__ipow__"] + ) + @pytest.mark.parametrize("inplace", [False, True]) + def test_inplace_drop_and_operation(self, operation, inplace): + # GH#30484 + df = pd.DataFrame({"x": range(5)}) + expected = df.copy() + df["y"] = range(5) + y = df["y"] + + with tm.assert_produces_warning(None): + if inplace: + df.drop("y", axis=1, inplace=inplace) + else: + df = df.drop("y", axis=1, inplace=inplace) + + # Perform operation and check result + getattr(y, operation)(1) + tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index b68e20bee63fc..12945533b17ae 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -488,24 +488,3 @@ def test_reindex_multi_categorical_time(self): result = df2.reindex(midx) expected = pd.DataFrame({"a": [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx) tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "operation", ["__iadd__", "__isub__", "__imul__", "__ipow__"] - ) - @pytest.mark.parametrize("inplace", [False, True]) - def test_inplace_drop_and_operation(self, operation, inplace): - # GH 30484 - df = pd.DataFrame({"x": range(5)}) - expected = df.copy() - df["y"] = range(5) - y = df["y"] - - with tm.assert_produces_warning(None): - if inplace: - df.drop("y", axis=1, inplace=inplace) - else: - df = df.drop("y", axis=1, inplace=inplace) - - # Perform operation and check result - getattr(y, operation)(1) - tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 8ec11d14cd606..c6708a7b7f6c9 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2677,6 +2677,13 @@ def test_with_mismatched_index_length_raises(self): with pytest.raises(ValueError, match="Shape of passed values"): DataFrame(dti, index=range(4)) + def test_frame_ctor_datetime64_column(self): + rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") + dates = np.asarray(rng) + + df = DataFrame({"A": np.random.randn(len(rng)), "B": dates}) + assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]")) + class TestDataFrameConstructorWithDatetimeTZ: def test_from_dict(self): diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_logical_ops.py similarity index 100% rename from pandas/tests/frame/test_operators.py rename to pandas/tests/frame/test_logical_ops.py diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 63361789b8e50..e4e22953397ca 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -1,18 +1,11 @@ import numpy as np import pandas as pd -from pandas import DataFrame, date_range, to_datetime +from pandas import DataFrame, to_datetime import pandas._testing as tm class TestDataFrameTimeSeriesMethods: - def test_frame_ctor_datetime64_column(self): - rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") - dates = np.asarray(rng) - - df = DataFrame({"A": np.random.randn(len(rng)), "B": dates}) - assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]")) - def test_frame_append_datetime64_col_other_units(self): n = 100 diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_logical_ops.py similarity index 100% rename from pandas/tests/series/test_operators.py rename to pandas/tests/series/test_logical_ops.py
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37271
2020-10-20T03:39:12Z
2020-10-20T16:51:55Z
2020-10-20T16:51:55Z
2020-10-20T17:31:13Z
REGR: Make comparisons consistent for PeriodDtype
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index 39de351ad4664..c3868fd147974 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`Series.astype` converting ``None`` to ``"nan"`` when casting to string (:issue:`36904`) - Fixed regression in :class:`RollingGroupby` causing a segmentation fault with Index of dtype object (:issue:`36727`) - Fixed regression in :meth:`DataFrame.resample(...).apply(...)` raised ``AttributeError`` when input was a :class:`DataFrame` and only a :class:`Series` was evaluated (:issue:`36951`) +- Fixed regression in :class:`PeriodDtype` comparing both equal and unequal to its string representation (:issue:`37265`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 28b3a2414679b..01b34187997cb 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -907,6 +907,9 @@ def __eq__(self, other: Any) -> bool: return isinstance(other, PeriodDtype) and self.freq == other.freq + def __ne__(self, other: Any) -> bool: + return not self.__eq__(other) + def __setstate__(self, state): # for pickle compat. __getstate__ is defined in the # PandasExtensionDtype superclass and uses the public properties to diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index a58dc5e5ec74a..f6cd500f911b2 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -991,3 +991,10 @@ def test_is_dtype_no_warning(check): with tm.assert_produces_warning(None): check(data["A"]) + + +def test_period_dtype_compare_to_string(): + # https://github.com/pandas-dev/pandas/issues/37265 + dtype = PeriodDtype(freq="M") + assert (dtype == "period[M]") is True + assert (dtype != "period[M]") is False
- [x] closes #37265 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37270
2020-10-20T01:59:04Z
2020-10-20T16:50:02Z
2020-10-20T16:50:01Z
2020-10-21T11:38:18Z
BUG: Don't raise TypeError when converting NA from string to numeric
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 83a9edfb239e2..c797114b17cc0 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -409,7 +409,7 @@ Conversion Strings ^^^^^^^ - Bug in :meth:`Series.to_string`, :meth:`DataFrame.to_string`, and :meth:`DataFrame.to_latex` adding a leading space when ``index=False`` (:issue:`24980`) -- +- Bug in :func:`to_numeric` raising a ``TypeError`` when attempting to convert a string dtype :class:`Series` containing only numeric strings and ``NA`` (:issue:`37262`) - diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index f4caafb3a9fe7..001fbae120ae8 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2019,7 +2019,7 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, elif util.is_bool_object(val): floats[i] = uints[i] = ints[i] = bools[i] = val seen.bool_ = True - elif val is None: + elif val is None or val is C_NA: seen.saw_null() floats[i] = complexes[i] = NaN elif hasattr(val, '__len__') and len(val) == 0: diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py index 450076f2824ad..b22f249de2826 100644 --- a/pandas/tests/tools/test_to_numeric.py +++ b/pandas/tests/tools/test_to_numeric.py @@ -707,3 +707,21 @@ def test_precision_float_conversion(strrep): result = to_numeric(strrep) assert result == float(strrep) + + +@pytest.mark.parametrize( + "values, expected", + [ + (["1", "2", None], Series([1, 2, np.nan])), + (["1", "2", "3"], Series([1, 2, 3])), + (["1", "2", 3], Series([1, 2, 3])), + (["1", "2", 3.5], Series([1, 2, 3.5])), + (["1", None, 3.5], Series([1, np.nan, 3.5])), + (["1", "2", "3.5"], Series([1, 2, 3.5])), + ], +) +def test_to_numeric_from_nullable_string(values, expected): + # https://github.com/pandas-dev/pandas/issues/37262 + s = Series(values, dtype="string") + result = to_numeric(s) + tm.assert_series_equal(result, expected)
- [x] closes #37262 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37268
2020-10-20T00:26:02Z
2020-10-20T23:10:11Z
2020-10-20T23:10:10Z
2020-10-20T23:12:42Z
REF: nargsort incorrectly calling _values_for_argsort
diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst index 99d642c379921..dd859dabc9c64 100644 --- a/doc/source/whatsnew/v0.24.1.rst +++ b/doc/source/whatsnew/v0.24.1.rst @@ -1,7 +1,7 @@ .. _whatsnew_0241: What's new in 0.24.1 (February 3, 2019) --------------------------------------- +--------------------------------------- .. warning:: diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index f8f919664ed25..36684d465373c 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -1,7 +1,7 @@ .. _whatsnew_0242: What's new in 0.24.2 (March 12, 2019) ------------------------------------- +------------------------------------- .. warning:: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index debfb50caeeaa..f8ff5ac18bbd9 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -507,7 +507,12 @@ def _values_for_argsort(self) -> np.ndarray: return np.array(self) def argsort( - self, ascending: bool = True, kind: str = "quicksort", *args, **kwargs + self, + ascending: bool = True, + kind: str = "quicksort", + na_position: str = "last", + *args, + **kwargs, ) -> np.ndarray: """ Return the indices that would sort this array. @@ -538,7 +543,14 @@ def argsort( # 2. argsort : total control over sorting. ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs) - result = nargsort(self, kind=kind, ascending=ascending, na_position="last") + values = self._values_for_argsort() + result = nargsort( + values, + kind=kind, + ascending=ascending, + na_position=na_position, + mask=np.asarray(self.isna()), + ) return result def argmin(self): diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index e02b565ed5d7b..1132234ae7f8d 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -327,6 +327,7 @@ def nargsort( ascending: bool = True, na_position: str = "last", key: Optional[Callable] = None, + mask: Optional[np.ndarray] = None, ): """ Intended to be a drop-in replacement for np.argsort which handles NaNs. @@ -341,19 +342,27 @@ def nargsort( ascending : bool, default True na_position : {'first', 'last'}, default 'last' key : Optional[Callable], default None + mask : Optional[np.ndarray], default None + Passed when called by ExtensionArray.argsort. """ if key is not None: items = ensure_key_mapped(items, key) return nargsort( - items, kind=kind, ascending=ascending, na_position=na_position, key=None + items, + kind=kind, + ascending=ascending, + na_position=na_position, + key=None, + mask=mask, ) items = extract_array(items) - mask = np.asarray(isna(items)) + if mask is None: + mask = np.asarray(isna(items)) if is_extension_array_dtype(items): - items = items._values_for_argsort() + return items.argsort(ascending=ascending, kind=kind, na_position=na_position) else: items = np.asanyarray(items)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/37266
2020-10-19T22:01:31Z
2020-10-20T00:25:52Z
2020-10-20T00:25:52Z
2020-10-20T00:38:13Z
ENH: implement matching warning message
diff --git a/pandas/_testing.py b/pandas/_testing.py index cf6272edc4c05..a4fdb390abf42 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -6,6 +6,7 @@ import gzip import operator import os +import re from shutil import rmtree import string import tempfile @@ -2546,10 +2547,11 @@ def wrapper(*args, **kwargs): @contextmanager def assert_produces_warning( - expected_warning=Warning, + expected_warning: Optional[Union[Type[Warning], bool]] = Warning, filter_level="always", - check_stacklevel=True, - raise_on_extra_warnings=True, + check_stacklevel: bool = True, + raise_on_extra_warnings: bool = True, + match: Optional[str] = None, ): """ Context manager for running code expected to either raise a specific @@ -2584,6 +2586,8 @@ class for all warnings. To check that no warning is returned, raise_on_extra_warnings : bool, default True Whether extra warnings not of the type `expected_warning` should cause the test to fail. + match : str, optional + Match warning message. Examples -------- @@ -2610,28 +2614,28 @@ class for all warnings. To check that no warning is returned, with warnings.catch_warnings(record=True) as w: saw_warning = False + matched_message = False + warnings.simplefilter(filter_level) yield w extra_warnings = [] for actual_warning in w: - if expected_warning and issubclass( - actual_warning.category, expected_warning - ): + if not expected_warning: + continue + + expected_warning = cast(Type[Warning], expected_warning) + if issubclass(actual_warning.category, expected_warning): saw_warning = True if check_stacklevel and issubclass( actual_warning.category, (FutureWarning, DeprecationWarning) ): - from inspect import getframeinfo, stack + _assert_raised_with_correct_stacklevel(actual_warning) + + if match is not None and re.search(match, str(actual_warning.message)): + matched_message = True - caller = getframeinfo(stack()[2][0]) - msg = ( - "Warning not set with correct stacklevel. " - f"File where warning is raised: {actual_warning.filename} != " - f"{caller.filename}. Warning message: {actual_warning.message}" - ) - assert actual_warning.filename == caller.filename, msg else: extra_warnings.append( ( @@ -2641,18 +2645,41 @@ class for all warnings. To check that no warning is returned, actual_warning.lineno, ) ) + if expected_warning: - msg = ( - f"Did not see expected warning of class " - f"{repr(expected_warning.__name__)}" - ) - assert saw_warning, msg + expected_warning = cast(Type[Warning], expected_warning) + if not saw_warning: + raise AssertionError( + f"Did not see expected warning of class " + f"{repr(expected_warning.__name__)}" + ) + + if match and not matched_message: + raise AssertionError( + f"Did not see warning {repr(expected_warning.__name__)} " + f"matching {match}" + ) + if raise_on_extra_warnings and extra_warnings: raise AssertionError( f"Caused unexpected warning(s): {repr(extra_warnings)}" ) +def _assert_raised_with_correct_stacklevel( + actual_warning: warnings.WarningMessage, +) -> None: + from inspect import getframeinfo, stack + + caller = getframeinfo(stack()[3][0]) + msg = ( + "Warning not set with correct stacklevel. " + f"File where warning is raised: {actual_warning.filename} != " + f"{caller.filename}. Warning message: {actual_warning.message}" + ) + assert actual_warning.filename == caller.filename, msg + + class RNGContext: """ Context manager to set the numpy random number generator speed. Returns diff --git a/pandas/tests/util/test_assert_produces_warning.py b/pandas/tests/util/test_assert_produces_warning.py index 87765c909938d..5f87824713175 100644 --- a/pandas/tests/util/test_assert_produces_warning.py +++ b/pandas/tests/util/test_assert_produces_warning.py @@ -1,10 +1,58 @@ +"""" +Test module for testing ``pandas._testing.assert_produces_warning``. +""" import warnings import pytest +from pandas.errors import DtypeWarning, PerformanceWarning + import pandas._testing as tm +@pytest.fixture( + params=[ + RuntimeWarning, + ResourceWarning, + UserWarning, + FutureWarning, + DeprecationWarning, + PerformanceWarning, + DtypeWarning, + ], +) +def category(request): + """ + Return unique warning. + + Useful for testing behavior of tm.assert_produces_warning with various categories. + """ + return request.param + + +@pytest.fixture( + params=[ + (RuntimeWarning, UserWarning), + (UserWarning, FutureWarning), + (FutureWarning, RuntimeWarning), + (DeprecationWarning, PerformanceWarning), + (PerformanceWarning, FutureWarning), + (DtypeWarning, DeprecationWarning), + (ResourceWarning, DeprecationWarning), + (FutureWarning, DeprecationWarning), + ], + ids=lambda x: type(x).__name__, +) +def pair_different_warnings(request): + """ + Return pair or different warnings. + + Useful for testing how several different warnings are handled + in tm.assert_produces_warning. + """ + return request.param + + def f(): warnings.warn("f1", FutureWarning) warnings.warn("f2", RuntimeWarning) @@ -20,3 +68,87 @@ def test_assert_produces_warning_honors_filter(): with tm.assert_produces_warning(RuntimeWarning, raise_on_extra_warnings=False): f() + + +@pytest.mark.parametrize( + "message, match", + [ + ("", None), + ("", ""), + ("Warning message", r".*"), + ("Warning message", "War"), + ("Warning message", r"[Ww]arning"), + ("Warning message", "age"), + ("Warning message", r"age$"), + ("Message 12-234 with numbers", r"\d{2}-\d{3}"), + ("Message 12-234 with numbers", r"^Mes.*\d{2}-\d{3}"), + ("Message 12-234 with numbers", r"\d{2}-\d{3}\s\S+"), + ("Message, which we do not match", None), + ], +) +def test_catch_warning_category_and_match(category, message, match): + with tm.assert_produces_warning(category, match=match): + warnings.warn(message, category) + + +@pytest.mark.parametrize( + "message, match", + [ + ("Warning message", "Not this message"), + ("Warning message", "warning"), + ("Warning message", r"\d+"), + ], +) +def test_fail_to_match(category, message, match): + msg = f"Did not see warning {repr(category.__name__)} matching" + with pytest.raises(AssertionError, match=msg): + with tm.assert_produces_warning(category, match=match): + warnings.warn(message, category) + + +def test_fail_to_catch_actual_warning(pair_different_warnings): + expected_category, actual_category = pair_different_warnings + match = "Did not see expected warning of class" + with pytest.raises(AssertionError, match=match): + with tm.assert_produces_warning(expected_category): + warnings.warn("warning message", actual_category) + + +def test_ignore_extra_warning(pair_different_warnings): + expected_category, extra_category = pair_different_warnings + with tm.assert_produces_warning(expected_category, raise_on_extra_warnings=False): + warnings.warn("Expected warning", expected_category) + warnings.warn("Unexpected warning OK", extra_category) + + +def test_raise_on_extra_warning(pair_different_warnings): + expected_category, extra_category = pair_different_warnings + match = r"Caused unexpected warning\(s\)" + with pytest.raises(AssertionError, match=match): + with tm.assert_produces_warning(expected_category): + warnings.warn("Expected warning", expected_category) + warnings.warn("Unexpected warning NOT OK", extra_category) + + +def test_same_category_different_messages_first_match(): + category = UserWarning + with tm.assert_produces_warning(category, match=r"^Match this"): + warnings.warn("Match this", category) + warnings.warn("Do not match that", category) + warnings.warn("Do not match that either", category) + + +def test_same_category_different_messages_last_match(): + category = DeprecationWarning + with tm.assert_produces_warning(category, match=r"^Match this"): + warnings.warn("Do not match that", category) + warnings.warn("Do not match that either", category) + warnings.warn("Match this", category) + + +def test_right_category_wrong_match_raises(pair_different_warnings): + target_category, other_category = pair_different_warnings + with pytest.raises(AssertionError, match="Did not see warning.*matching"): + with tm.assert_produces_warning(target_category, match=r"^Match this"): + warnings.warn("Do not match it", target_category) + warnings.warn("Match this", other_category)
- [ ] closes #37261 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Implement matching warning message. New kwarg ``match`` in ``assert_produces_warning``.
https://api.github.com/repos/pandas-dev/pandas/pulls/37263
2020-10-19T18:54:36Z
2020-10-20T16:51:01Z
2020-10-20T16:51:01Z
2020-11-06T15:38:58Z
BUG: with integer column labels, .info() throws KeyError
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index ad59711b90f6e..7dd660374a6fc 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -30,6 +30,7 @@ Bug fixes - Bug causing ``groupby(...).sum()`` and similar to not preserve metadata (:issue:`29442`) - Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` raising a ``ValueError`` when the target was read-only (:issue:`37174`) - Bug in :meth:`GroupBy.fillna` that introduced a performance regression after 1.0.5 (:issue:`36757`) +- Bug in :meth:`DataFrame.info` was raising a ``KeyError`` when the DataFrame has integer column names (:issue:`37245`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py index 970bb8c535534..a57fda7472878 100644 --- a/pandas/io/formats/info.py +++ b/pandas/io/formats/info.py @@ -340,13 +340,13 @@ def _verbose_repr( lines.append(top_separator) for i, col in enumerate(ids): - dtype = dtypes[i] + dtype = dtypes.iloc[i] col = pprint_thing(col) line_no = _put_str(f" {i}", space_num) count = "" if show_counts: - count = counts[i] + count = counts.iloc[i] lines.append( line_no diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py index d98530b5435e7..fd44bd431d50f 100644 --- a/pandas/tests/io/formats/test_info.py +++ b/pandas/tests/io/formats/test_info.py @@ -459,3 +459,25 @@ def test_info_categorical(): buf = StringIO() df.info(buf=buf) + + +def test_info_int_columns(): + # GH#37245 + df = DataFrame({1: [1, 2], 2: [2, 3]}, index=["A", "B"]) + buf = StringIO() + df.info(null_counts=True, buf=buf) + result = buf.getvalue() + expected = textwrap.dedent( + """\ + <class 'pandas.core.frame.DataFrame'> + Index: 2 entries, A to B + Data columns (total 2 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 1 2 non-null int64 + 1 2 2 non-null int64 + dtypes: int64(2) + memory usage: 48.0+ bytes + """ + ) + assert result == expected
- [ ] closes #37245 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37256
2020-10-19T16:54:17Z
2020-10-20T00:51:57Z
2020-10-20T00:51:56Z
2020-10-20T19:47:54Z
TST: use equivalent fixtures in test_multilevel
diff --git a/pandas/conftest.py b/pandas/conftest.py index ebb24c184d9a4..5a4bc397ab792 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -361,6 +361,19 @@ def multiindex_year_month_day_dataframe_random_data(): return ymd +@pytest.fixture +def multiindex_dataframe_random_data(): + """DataFrame with 2 level MultiIndex with random data""" + index = MultiIndex( + levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["first", "second"], + ) + return DataFrame( + np.random.randn(10, 3), index=index, columns=Index(["A", "B", "C"], name="exp") + ) + + def _create_multiindex(): """ MultiIndex used to test the general functionality of this object diff --git a/pandas/tests/indexing/multiindex/conftest.py b/pandas/tests/indexing/multiindex/conftest.py deleted file mode 100644 index c69d6f86a6ce6..0000000000000 --- a/pandas/tests/indexing/multiindex/conftest.py +++ /dev/null @@ -1,17 +0,0 @@ -import numpy as np -import pytest - -from pandas import DataFrame, Index, MultiIndex - - -@pytest.fixture -def multiindex_dataframe_random_data(): - """DataFrame with 2 level MultiIndex with random data""" - index = MultiIndex( - levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], - codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=["first", "second"], - ) - return DataFrame( - np.random.randn(10, 3), index=index, columns=Index(["A", "B", "C"], name="exp") - ) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 274860b3fdb5c..9c29d3a062dfa 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -31,21 +31,6 @@ class Base: def setup_method(self, method): - index = MultiIndex( - levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], - codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=["first", "second"], - ) - self.frame = DataFrame( - np.random.randn(10, 3), - index=index, - columns=Index(["A", "B", "C"], name="exp"), - ) - - self.single_level = MultiIndex( - levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"] - ) - # create test series object arrays = [ ["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"], @@ -57,27 +42,18 @@ def setup_method(self, method): s[3] = np.NaN self.series = s - self.tdf = tm.makeTimeDataFrame(100) - self.ymd = self.tdf.groupby( - [lambda x: x.year, lambda x: x.month, lambda x: x.day] - ).sum() - - # use Int64Index, to make sure things work - self.ymd.index = self.ymd.index.set_levels( - [lev.astype("i8") for lev in self.ymd.index.levels] - ) - self.ymd.index.set_names(["year", "month", "day"], inplace=True) - class TestMultiLevel(Base): - def test_append(self): - a, b = self.frame[:5], self.frame[5:] + def test_append(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + a, b = frame[:5], frame[5:] result = a.append(b) - tm.assert_frame_equal(result, self.frame) + tm.assert_frame_equal(result, frame) result = a["A"].append(b["A"]) - tm.assert_series_equal(result, self.frame["A"]) + tm.assert_series_equal(result, frame["A"]) def test_dataframe_constructor(self): multi = DataFrame( @@ -104,40 +80,44 @@ def test_series_constructor(self): multi = Series(range(4), index=[["a", "a", "b", "b"], ["x", "y", "x", "y"]]) assert isinstance(multi.index, MultiIndex) - def test_reindex_level(self): + def test_reindex_level(self, multiindex_year_month_day_dataframe_random_data): # axis=0 - month_sums = self.ymd.sum(level="month") - result = month_sums.reindex(self.ymd.index, level=1) - expected = self.ymd.groupby(level="month").transform(np.sum) + ymd = multiindex_year_month_day_dataframe_random_data + + month_sums = ymd.sum(level="month") + result = month_sums.reindex(ymd.index, level=1) + expected = ymd.groupby(level="month").transform(np.sum) tm.assert_frame_equal(result, expected) # Series - result = month_sums["A"].reindex(self.ymd.index, level=1) - expected = self.ymd["A"].groupby(level="month").transform(np.sum) + result = month_sums["A"].reindex(ymd.index, level=1) + expected = ymd["A"].groupby(level="month").transform(np.sum) tm.assert_series_equal(result, expected, check_names=False) # axis=1 - month_sums = self.ymd.T.sum(axis=1, level="month") - result = month_sums.reindex(columns=self.ymd.index, level=1) - expected = self.ymd.groupby(level="month").transform(np.sum).T + month_sums = ymd.T.sum(axis=1, level="month") + result = month_sums.reindex(columns=ymd.index, level=1) + expected = ymd.groupby(level="month").transform(np.sum).T tm.assert_frame_equal(result, expected) - def test_binops_level(self): + def test_binops_level(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + def _check_op(opname): op = getattr(DataFrame, opname) - month_sums = self.ymd.sum(level="month") - result = op(self.ymd, month_sums, level="month") + month_sums = ymd.sum(level="month") + result = op(ymd, month_sums, level="month") - broadcasted = self.ymd.groupby(level="month").transform(np.sum) - expected = op(self.ymd, broadcasted) + broadcasted = ymd.groupby(level="month").transform(np.sum) + expected = op(ymd, broadcasted) tm.assert_frame_equal(result, expected) # Series op = getattr(Series, opname) - result = op(self.ymd["A"], month_sums["A"], level="month") - broadcasted = self.ymd["A"].groupby(level="month").transform(np.sum) - expected = op(self.ymd["A"], broadcasted) + result = op(ymd["A"], month_sums["A"], level="month") + broadcasted = ymd["A"].groupby(level="month").transform(np.sum) + expected = op(ymd["A"], broadcasted) expected.name = "A" tm.assert_series_equal(result, expected) @@ -146,47 +126,67 @@ def _check_op(opname): _check_op("mul") _check_op("div") - def test_pickle(self): + def test_pickle( + self, + multiindex_year_month_day_dataframe_random_data, + multiindex_dataframe_random_data, + ): + ymd = multiindex_year_month_day_dataframe_random_data + frame = multiindex_dataframe_random_data + def _test_roundtrip(frame): unpickled = tm.round_trip_pickle(frame) tm.assert_frame_equal(frame, unpickled) - _test_roundtrip(self.frame) - _test_roundtrip(self.frame.T) - _test_roundtrip(self.ymd) - _test_roundtrip(self.ymd.T) + _test_roundtrip(frame) + _test_roundtrip(frame.T) + _test_roundtrip(ymd) + _test_roundtrip(ymd.T) - def test_reindex(self): - expected = self.frame.iloc[[0, 3]] - reindexed = self.frame.loc[[("foo", "one"), ("bar", "one")]] + def test_reindex(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + expected = frame.iloc[[0, 3]] + reindexed = frame.loc[[("foo", "one"), ("bar", "one")]] tm.assert_frame_equal(reindexed, expected) - def test_reindex_preserve_levels(self): - new_index = self.ymd.index[::10] - chunk = self.ymd.reindex(new_index) + def test_reindex_preserve_levels( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + new_index = ymd.index[::10] + chunk = ymd.reindex(new_index) assert chunk.index is new_index - chunk = self.ymd.loc[new_index] + chunk = ymd.loc[new_index] assert chunk.index is new_index - ymdT = self.ymd.T + ymdT = ymd.T chunk = ymdT.reindex(columns=new_index) assert chunk.columns is new_index chunk = ymdT.loc[:, new_index] assert chunk.columns is new_index - def test_repr_to_string(self): - repr(self.frame) - repr(self.ymd) - repr(self.frame.T) - repr(self.ymd.T) + def test_repr_to_string( + self, + multiindex_year_month_day_dataframe_random_data, + multiindex_dataframe_random_data, + ): + ymd = multiindex_year_month_day_dataframe_random_data + frame = multiindex_dataframe_random_data + + repr(frame) + repr(ymd) + repr(frame.T) + repr(ymd.T) buf = StringIO() - self.frame.to_string(buf=buf) - self.ymd.to_string(buf=buf) - self.frame.T.to_string(buf=buf) - self.ymd.T.to_string(buf=buf) + frame.to_string(buf=buf) + ymd.to_string(buf=buf) + frame.T.to_string(buf=buf) + ymd.T.to_string(buf=buf) def test_repr_name_coincide(self): index = MultiIndex.from_tuples( @@ -206,10 +206,14 @@ def test_delevel_infer_dtype(self): assert is_integer_dtype(deleveled["prm1"]) assert is_float_dtype(deleveled["prm2"]) - def test_reset_index_with_drop(self): - deleveled = self.ymd.reset_index(drop=True) - assert len(deleveled.columns) == len(self.ymd.columns) - assert deleveled.index.name == self.ymd.index.name + def test_reset_index_with_drop( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + deleveled = ymd.reset_index(drop=True) + assert len(deleveled.columns) == len(ymd.columns) + assert deleveled.index.name == ymd.index.name deleveled = self.series.reset_index() assert isinstance(deleveled, DataFrame) @@ -220,7 +224,14 @@ def test_reset_index_with_drop(self): assert isinstance(deleveled, Series) assert deleveled.index.name == self.series.index.name - def test_count_level(self): + def test_count_level( + self, + multiindex_year_month_day_dataframe_random_data, + multiindex_dataframe_random_data, + ): + ymd = multiindex_year_month_day_dataframe_random_data + frame = multiindex_dataframe_random_data + def _check_counts(frame, axis=0): index = frame._get_axis(axis) for i in range(index.nlevels): @@ -229,23 +240,23 @@ def _check_counts(frame, axis=0): expected = expected.reindex_like(result).astype("i8") tm.assert_frame_equal(result, expected) - self.frame.iloc[1, [1, 2]] = np.nan - self.frame.iloc[7, [0, 1]] = np.nan - self.ymd.iloc[1, [1, 2]] = np.nan - self.ymd.iloc[7, [0, 1]] = np.nan + frame.iloc[1, [1, 2]] = np.nan + frame.iloc[7, [0, 1]] = np.nan + ymd.iloc[1, [1, 2]] = np.nan + ymd.iloc[7, [0, 1]] = np.nan - _check_counts(self.frame) - _check_counts(self.ymd) - _check_counts(self.frame.T, axis=1) - _check_counts(self.ymd.T, axis=1) + _check_counts(frame) + _check_counts(ymd) + _check_counts(frame.T, axis=1) + _check_counts(ymd.T, axis=1) # can't call with level on regular DataFrame df = tm.makeTimeDataFrame() with pytest.raises(TypeError, match="hierarchical"): df.count(level=0) - self.frame["D"] = "foo" - result = self.frame.count(level=0, numeric_only=True) + frame["D"] = "foo" + result = frame.count(level=0, numeric_only=True) tm.assert_index_equal(result.columns, Index(list("ABC"), name="exp")) def test_count_index_with_nan(self): @@ -296,13 +307,15 @@ def test_count_level_series(self): result.astype("f8"), expected.reindex(result.index).fillna(0) ) - def test_count_level_corner(self): - s = self.frame["A"][:0] + def test_count_level_corner(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + s = frame["A"][:0] result = s.count(level=0) expected = Series(0, index=s.index.levels[0], name="A") tm.assert_series_equal(result, expected) - df = self.frame[:0] + df = frame[:0] result = df.count(level=0) expected = ( DataFrame(index=s.index.levels[0].set_names(["first"]), columns=df.columns) @@ -311,22 +324,26 @@ def test_count_level_corner(self): ) tm.assert_frame_equal(result, expected) - def test_get_level_number_out_of_bounds(self): + def test_get_level_number_out_of_bounds(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + with pytest.raises(IndexError, match="Too many levels"): - self.frame.index._get_level_number(2) + frame.index._get_level_number(2) with pytest.raises(IndexError, match="not a valid level number"): - self.frame.index._get_level_number(-3) + frame.index._get_level_number(-3) - def test_unstack(self): + def test_unstack(self, multiindex_year_month_day_dataframe_random_data): # just check that it works for now - unstacked = self.ymd.unstack() + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack() unstacked.unstack() # test that ints work - self.ymd.astype(int).unstack() + ymd.astype(int).unstack() # test that int32 work - self.ymd.astype(np.int32).unstack() + ymd.astype(np.int32).unstack() @pytest.mark.parametrize( "result_rows,result_columns,index_product,expected_row", @@ -382,58 +399,60 @@ def test_unstack_multiple_no_empty_columns(self): expected = unstacked.dropna(axis=1, how="all") tm.assert_frame_equal(unstacked, expected) - def test_stack(self): + def test_stack(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + # regular roundtrip - unstacked = self.ymd.unstack() + unstacked = ymd.unstack() restacked = unstacked.stack() - tm.assert_frame_equal(restacked, self.ymd) + tm.assert_frame_equal(restacked, ymd) - unlexsorted = self.ymd.sort_index(level=2) + unlexsorted = ymd.sort_index(level=2) unstacked = unlexsorted.unstack(2) restacked = unstacked.stack() - tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd) + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) unlexsorted = unlexsorted[::-1] unstacked = unlexsorted.unstack(1) restacked = unstacked.stack().swaplevel(1, 2) - tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd) + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) unlexsorted = unlexsorted.swaplevel(0, 1) unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1) restacked = unstacked.stack(0).swaplevel(1, 2) - tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd) + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) # columns unsorted - unstacked = self.ymd.unstack() + unstacked = ymd.unstack() unstacked = unstacked.sort_index(axis=1, ascending=False) restacked = unstacked.stack() - tm.assert_frame_equal(restacked, self.ymd) + tm.assert_frame_equal(restacked, ymd) # more than 2 levels in the columns - unstacked = self.ymd.unstack(1).unstack(1) + unstacked = ymd.unstack(1).unstack(1) result = unstacked.stack(1) - expected = self.ymd.unstack() + expected = ymd.unstack() tm.assert_frame_equal(result, expected) result = unstacked.stack(2) - expected = self.ymd.unstack(1) + expected = ymd.unstack(1) tm.assert_frame_equal(result, expected) result = unstacked.stack(0) - expected = self.ymd.stack().unstack(1).unstack(1) + expected = ymd.stack().unstack(1).unstack(1) tm.assert_frame_equal(result, expected) # not all levels present in each echelon - unstacked = self.ymd.unstack(2).loc[:, ::3] + unstacked = ymd.unstack(2).loc[:, ::3] stacked = unstacked.stack().stack() - ymd_stacked = self.ymd.stack() + ymd_stacked = ymd.stack() tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index)) # stack with negative number - result = self.ymd.unstack(0).stack(-2) - expected = self.ymd.unstack(0).stack(0) + result = ymd.unstack(0).stack(-2) + expected = ymd.unstack(0).stack(0) # GH10417 def check(left, right): @@ -501,8 +520,10 @@ def test_unstack_odd_failure(self): recons = result.stack() tm.assert_frame_equal(recons, df) - def test_stack_mixed_dtype(self): - df = self.frame.T + def test_stack_mixed_dtype(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + df = frame.T df["foo", "four"] = "foo" df = df.sort_index(level=1, axis=1) @@ -529,20 +550,25 @@ def test_unstack_bug(self): restacked = unstacked.stack() tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float)) - def test_stack_unstack_preserve_names(self): - unstacked = self.frame.unstack() + def test_stack_unstack_preserve_names(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + unstacked = frame.unstack() assert unstacked.index.name == "first" assert unstacked.columns.names == ["exp", "second"] restacked = unstacked.stack() - assert restacked.index.names == self.frame.index.names + assert restacked.index.names == frame.index.names @pytest.mark.parametrize("method", ["stack", "unstack"]) - def test_stack_unstack_wrong_level_name(self, method): + def test_stack_unstack_wrong_level_name( + self, method, multiindex_dataframe_random_data + ): # GH 18303 - wrong level name should raise + frame = multiindex_dataframe_random_data # A DataFrame with flat axes: - df = self.frame.loc["foo"] + df = frame.loc["foo"] with pytest.raises(KeyError, match="does not match index name"): getattr(df, method)("mistake") @@ -564,29 +590,37 @@ def test_unused_level_raises(self): with pytest.raises(KeyError, match="notevenone"): df["notevenone"] - def test_unstack_level_name(self): - result = self.frame.unstack("second") - expected = self.frame.unstack(level=1) + def test_unstack_level_name(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.unstack("second") + expected = frame.unstack(level=1) tm.assert_frame_equal(result, expected) - def test_stack_level_name(self): - unstacked = self.frame.unstack("second") + def test_stack_level_name(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + unstacked = frame.unstack("second") result = unstacked.stack("exp") - expected = self.frame.unstack().stack(0) + expected = frame.unstack().stack(0) tm.assert_frame_equal(result, expected) - result = self.frame.stack("exp") - expected = self.frame.stack() + result = frame.stack("exp") + expected = frame.stack() tm.assert_series_equal(result, expected) - def test_stack_unstack_multiple(self): - unstacked = self.ymd.unstack(["year", "month"]) - expected = self.ymd.unstack("year").unstack("month") + def test_stack_unstack_multiple( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + expected = ymd.unstack("year").unstack("month") tm.assert_frame_equal(unstacked, expected) assert unstacked.columns.names == expected.columns.names # series - s = self.ymd["A"] + s = ymd["A"] s_unstacked = s.unstack(["year", "month"]) tm.assert_frame_equal(s_unstacked, expected["A"]) @@ -594,28 +628,36 @@ def test_stack_unstack_multiple(self): restacked = restacked.swaplevel(0, 1).swaplevel(1, 2) restacked = restacked.sort_index(level=0) - tm.assert_frame_equal(restacked, self.ymd) - assert restacked.index.names == self.ymd.index.names + tm.assert_frame_equal(restacked, ymd) + assert restacked.index.names == ymd.index.names # GH #451 - unstacked = self.ymd.unstack([1, 2]) - expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how="all") + unstacked = ymd.unstack([1, 2]) + expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all") tm.assert_frame_equal(unstacked, expected) - unstacked = self.ymd.unstack([2, 1]) - expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how="all") + unstacked = ymd.unstack([2, 1]) + expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all") tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns]) - def test_stack_names_and_numbers(self): - unstacked = self.ymd.unstack(["year", "month"]) + def test_stack_names_and_numbers( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) # Can't use mixture of names and numbers to stack with pytest.raises(ValueError, match="level should contain"): unstacked.stack([0, "month"]) - def test_stack_multiple_out_of_bounds(self): + def test_stack_multiple_out_of_bounds( + self, multiindex_year_month_day_dataframe_random_data + ): # nlevels == 3 - unstacked = self.ymd.unstack(["year", "month"]) + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) with pytest.raises(IndexError, match="Too many levels"): unstacked.stack([2, 3]) @@ -783,8 +825,10 @@ def test_unstack_multiple_hierarchical(self): # it works! df.unstack(["b", "c"]) - def test_groupby_transform(self): - s = self.frame["A"] + def test_groupby_transform(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + s = frame["A"] grouper = s.index.get_level_values(0) grouped = s.groupby(grouper) @@ -926,12 +970,14 @@ def test_groupby_level_no_obs(self): result = grouped.sum() assert (result.columns == ["f2", "f3"]).all() - def test_join(self): - a = self.frame.loc[self.frame.index[:5], ["A"]] - b = self.frame.loc[self.frame.index[2:], ["B", "C"]] + def test_join(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + a = frame.loc[frame.index[:5], ["A"]] + b = frame.loc[frame.index[2:], ["B", "C"]] - joined = a.join(b, how="outer").reindex(self.frame.index) - expected = self.frame.copy() + joined = a.join(b, how="outer").reindex(frame.index) + expected = frame.copy() expected.values[np.isnan(joined.values)] = np.nan assert not np.isnan(joined.values).all() @@ -939,12 +985,14 @@ def test_join(self): # TODO what should join do with names ? tm.assert_frame_equal(joined, expected, check_names=False) - def test_swaplevel(self): - swapped = self.frame["A"].swaplevel() - swapped2 = self.frame["A"].swaplevel(0) - swapped3 = self.frame["A"].swaplevel(0, 1) - swapped4 = self.frame["A"].swaplevel("first", "second") - assert not swapped.index.equals(self.frame.index) + def test_swaplevel(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + swapped = frame["A"].swaplevel() + swapped2 = frame["A"].swaplevel(0) + swapped3 = frame["A"].swaplevel(0, 1) + swapped4 = frame["A"].swaplevel("first", "second") + assert not swapped.index.equals(frame.index) tm.assert_series_equal(swapped, swapped2) tm.assert_series_equal(swapped, swapped3) tm.assert_series_equal(swapped, swapped4) @@ -953,22 +1001,24 @@ def test_swaplevel(self): back2 = swapped.swaplevel(0) back3 = swapped.swaplevel(0, 1) back4 = swapped.swaplevel("second", "first") - assert back.index.equals(self.frame.index) + assert back.index.equals(frame.index) tm.assert_series_equal(back, back2) tm.assert_series_equal(back, back3) tm.assert_series_equal(back, back4) - ft = self.frame.T + ft = frame.T swapped = ft.swaplevel("first", "second", axis=1) - exp = self.frame.swaplevel("first", "second").T + exp = frame.swaplevel("first", "second").T tm.assert_frame_equal(swapped, exp) msg = "Can only swap levels on a hierarchical axis." with pytest.raises(TypeError, match=msg): DataFrame(range(3)).swaplevel() - def test_insert_index(self): - df = self.ymd[:5].T + def test_insert_index(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + + df = ymd[:5].T df[2000, 1, 10] = df[2000, 1, 7] assert isinstance(df.columns, MultiIndex) assert (df[2000, 1, 10] == df[2000, 1, 7]).all() @@ -993,16 +1043,18 @@ def test_alignment(self): exp = x.reindex(exp_index) - y.reindex(exp_index) tm.assert_series_equal(res, exp) - def test_count(self): - frame = self.frame.copy() + def test_count(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + frame = frame.copy() frame.index.names = ["a", "b"] result = frame.count(level="b") - expect = self.frame.count(level=1) + expect = frame.count(level=1) tm.assert_frame_equal(result, expect, check_names=False) result = frame.count(level="a") - expect = self.frame.count(level=0) + expect = frame.count(level=0) tm.assert_frame_equal(result, expect, check_names=False) series = self.series.copy() @@ -1041,17 +1093,21 @@ def test_series_group_min_max(self, op, level, skipna, sort): @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("sort", [True, False]) - def test_frame_group_ops(self, op, level, axis, skipna, sort): + def test_frame_group_ops( + self, op, level, axis, skipna, sort, multiindex_dataframe_random_data + ): # GH 17537 - self.frame.iloc[1, [1, 2]] = np.nan - self.frame.iloc[7, [0, 1]] = np.nan + frame = multiindex_dataframe_random_data - level_name = self.frame.index.names[level] + frame.iloc[1, [1, 2]] = np.nan + frame.iloc[7, [0, 1]] = np.nan + + level_name = frame.index.names[level] if axis == 0: - frame = self.frame + frame = frame else: - frame = self.frame.T + frame = frame.T grouped = frame.groupby(level=level, axis=axis, sort=sort) @@ -1134,28 +1190,34 @@ def test_std_var_pass_ddof(self): expected = df.groupby(level=0).agg(alt) tm.assert_frame_equal(result, expected) - def test_frame_series_agg_multiple_levels(self): - result = self.ymd.sum(level=["year", "month"]) - expected = self.ymd.groupby(level=["year", "month"]).sum() + def test_frame_series_agg_multiple_levels( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + result = ymd.sum(level=["year", "month"]) + expected = ymd.groupby(level=["year", "month"]).sum() tm.assert_frame_equal(result, expected) - result = self.ymd["A"].sum(level=["year", "month"]) - expected = self.ymd["A"].groupby(level=["year", "month"]).sum() + result = ymd["A"].sum(level=["year", "month"]) + expected = ymd["A"].groupby(level=["year", "month"]).sum() tm.assert_series_equal(result, expected) - def test_groupby_multilevel(self): - result = self.ymd.groupby(level=[0, 1]).mean() + def test_groupby_multilevel(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data - k1 = self.ymd.index.get_level_values(0) - k2 = self.ymd.index.get_level_values(1) + result = ymd.groupby(level=[0, 1]).mean() - expected = self.ymd.groupby([k1, k2]).mean() + k1 = ymd.index.get_level_values(0) + k2 = ymd.index.get_level_values(1) + + expected = ymd.groupby([k1, k2]).mean() # TODO groupby with level_values drops names tm.assert_frame_equal(result, expected, check_names=False) - assert result.index.names == self.ymd.index.names[:2] + assert result.index.names == ymd.index.names[:2] - result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean() + result2 = ymd.groupby(level=ymd.index.names[:2]).mean() tm.assert_frame_equal(result, result2) def test_groupby_multilevel_with_transform(self): @@ -1169,23 +1231,28 @@ def test_multilevel_consolidate(self): df["Totals", ""] = df.sum(1) df = df._consolidate() - def test_loc_preserve_names(self): - result = self.ymd.loc[2000] - result2 = self.ymd["A"].loc[2000] - assert result.index.names == self.ymd.index.names[1:] - assert result2.index.names == self.ymd.index.names[1:] + def test_loc_preserve_names(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + + result = ymd.loc[2000] + result2 = ymd["A"].loc[2000] + assert result.index.names == ymd.index.names[1:] + assert result2.index.names == ymd.index.names[1:] - result = self.ymd.loc[2000, 2] - result2 = self.ymd["A"].loc[2000, 2] - assert result.index.name == self.ymd.index.names[2] - assert result2.index.name == self.ymd.index.names[2] + result = ymd.loc[2000, 2] + result2 = ymd["A"].loc[2000, 2] + assert result.index.name == ymd.index.names[2] + assert result2.index.name == ymd.index.names[2] - def test_unstack_preserve_types(self): + def test_unstack_preserve_types( + self, multiindex_year_month_day_dataframe_random_data + ): # GH #403 - self.ymd["E"] = "foo" - self.ymd["F"] = 2 + ymd = multiindex_year_month_day_dataframe_random_data + ymd["E"] = "foo" + ymd["F"] = 2 - unstacked = self.ymd.unstack("month") + unstacked = ymd.unstack("month") assert unstacked["A", 1].dtype == np.float64 assert unstacked["E", 1].dtype == np.object_ assert unstacked["F", 1].dtype == np.float64 @@ -1227,10 +1294,12 @@ def test_unstack_group_index_overflow(self): result = s.unstack(4) assert result.shape == (500, 2) - def test_to_html(self): - self.ymd.columns.name = "foo" - self.ymd.to_html() - self.ymd.T.to_html() + def test_to_html(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + + ymd.columns.name = "foo" + ymd.to_html() + ymd.T.to_html() def test_level_with_tuples(self): index = MultiIndex( @@ -1305,21 +1374,23 @@ def test_mixed_depth_pop(self): tm.assert_frame_equal(expected, result) tm.assert_frame_equal(df1, df2) - def test_reindex_level_partial_selection(self): - result = self.frame.reindex(["foo", "qux"], level=0) - expected = self.frame.iloc[[0, 1, 2, 7, 8, 9]] + def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.reindex(["foo", "qux"], level=0) + expected = frame.iloc[[0, 1, 2, 7, 8, 9]] tm.assert_frame_equal(result, expected) - result = self.frame.T.reindex(["foo", "qux"], axis=1, level=0) + result = frame.T.reindex(["foo", "qux"], axis=1, level=0) tm.assert_frame_equal(result, expected.T) - result = self.frame.loc[["foo", "qux"]] + result = frame.loc[["foo", "qux"]] tm.assert_frame_equal(result, expected) - result = self.frame["A"].loc[["foo", "qux"]] + result = frame["A"].loc[["foo", "qux"]] tm.assert_series_equal(result, expected["A"]) - result = self.frame.T.loc[:, ["foo", "qux"]] + result = frame.T.loc[:, ["foo", "qux"]] tm.assert_frame_equal(result, expected.T) def test_unicode_repr_level_names(self): @@ -1742,9 +1813,11 @@ def test_subsets_multiindex_dtype(self): class TestSorted(Base): """ everything you wanted to test about sorting """ - def test_sort_index_preserve_levels(self): - result = self.frame.sort_index() - assert result.index.names == self.frame.index.names + def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.sort_index() + assert result.index.names == frame.index.names def test_sorting_repr_8017(self):
https://api.github.com/repos/pandas-dev/pandas/pulls/37255
2020-10-19T16:49:34Z
2020-10-20T02:09:11Z
2020-10-20T02:09:11Z
2020-10-20T02:09:23Z
fixed spelling errors in whatsnew, userguide, and ecosystem
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 25ca77627ef39..7878e2b67873a 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -230,7 +230,7 @@ allows users to view, manipulate and edit pandas ``Index``, ``Series``, and ``DataFrame`` objects like a "spreadsheet", including copying and modifying values, sorting, displaying a "heatmap", converting data types and more. pandas objects can also be renamed, duplicated, new columns added, -copyed/pasted to/from the clipboard (as TSV), and saved/loaded to/from a file. +copied/pasted to/from the clipboard (as TSV), and saved/loaded to/from a file. Spyder can also import data from a variety of plain text and binary files or the clipboard into a new pandas DataFrame via a sophisticated import wizard. diff --git a/doc/source/whatsnew/v0.16.2.rst b/doc/source/whatsnew/v0.16.2.rst index bb2aa166419b4..194bb61f2c1c8 100644 --- a/doc/source/whatsnew/v0.16.2.rst +++ b/doc/source/whatsnew/v0.16.2.rst @@ -147,7 +147,7 @@ Bug fixes - Bug in ``setitem`` where type promotion is applied to the entire block (:issue:`10280`) - Bug in ``Series`` arithmetic methods may incorrectly hold names (:issue:`10068`) - Bug in ``GroupBy.get_group`` when grouping on multiple keys, one of which is categorical. (:issue:`10132`) -- Bug in ``DatetimeIndex`` and ``TimedeltaIndex`` names are lost after timedelta arithmetics ( :issue:`9926`) +- Bug in ``DatetimeIndex`` and ``TimedeltaIndex`` names are lost after timedelta arithmetic ( :issue:`9926`) - Bug in ``DataFrame`` construction from nested ``dict`` with ``datetime64`` (:issue:`10160`) - Bug in ``Series`` construction from ``dict`` with ``datetime64`` keys (:issue:`9456`) - Bug in ``Series.plot(label="LABEL")`` not correctly setting the label (:issue:`10119`) diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst index 1918a1e8caf6c..99d642c379921 100644 --- a/doc/source/whatsnew/v0.24.1.rst +++ b/doc/source/whatsnew/v0.24.1.rst @@ -1,6 +1,6 @@ .. _whatsnew_0241: -Whats new in 0.24.1 (February 3, 2019) +What's new in 0.24.1 (February 3, 2019) -------------------------------------- .. warning:: diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index 27e84bf0a7cd7..f8f919664ed25 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -1,6 +1,6 @@ .. _whatsnew_0242: -Whats new in 0.24.2 (March 12, 2019) +What's new in 0.24.2 (March 12, 2019) ------------------------------------ .. warning:: diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 874caee23dae6..fd6e7c8e9cc02 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -180,7 +180,7 @@ Alternatively, you can also use the dtype object: .. warning:: Experimental: the new floating data types are currently experimental, and its - behaviour or API may still change without warning. Expecially the behaviour + behaviour or API may still change without warning. Especially the behaviour regarding NaN (distinct from NA missing values) is subject to change. .. _whatsnew_120.index_name_preservation:
Hi there, I fixed the spelling errors mentioned here. https://github.com/pandas-dev/pandas/issues/37252 The 2 "wit" spelling errors were confusing, and I could not understand the sentences. This is my first pull request, so please excuse my ignorance if I did something incorrectly. Thanks.
https://api.github.com/repos/pandas-dev/pandas/pulls/37253
2020-10-19T15:54:27Z
2020-10-19T18:07:27Z
2020-10-19T18:07:27Z
2020-10-19T18:07:35Z
DOC: update PeriodArray docstring
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index ba2048a496ef8..c77350d5f54bf 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -67,7 +67,9 @@ class PeriodArray(PeriodMixin, dtl.DatelikeOps): """ Pandas ExtensionArray for storing Period data. - Users should use :func:`period_array` to create new instances. + Users should use :func:`period_range` to create new instances. + Alternatively, :func:`array` can be used to create new instances + from a sequence of Period scalars. Parameters ---------- @@ -97,8 +99,10 @@ class PeriodArray(PeriodMixin, dtl.DatelikeOps): See Also -------- - period_array : Create a new PeriodArray. + Period: Represents a period of time. PeriodIndex : Immutable Index for period data. + period_range: Create a fixed-frequency PeriodArray. + array: Construct a pandas array. Notes -----
- [x] closes #37237 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37251
2020-10-19T14:27:37Z
2020-10-21T18:28:26Z
2020-10-21T18:28:26Z
2020-10-21T18:30:32Z
DOC: Replace pandas on Ray in ecosystem.rst with Modin
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 25ca77627ef39..4654fcf5a6165 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -376,6 +376,23 @@ Dask-ML enables parallel and distributed machine learning using Dask alongside e Koalas provides a familiar pandas DataFrame interface on top of Apache Spark. It enables users to leverage multi-cores on one machine or a cluster of machines to speed up or scale their DataFrame code. +`Modin <https://github.com/modin-project/modin>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``modin.pandas`` DataFrame is a parallel and distributed drop-in replacement +for pandas. This means that you can use Modin with existing pandas code or write +new code with the existing pandas API. Modin can leverage your entire machine or +cluster to speed up and scale your pandas workloads, including traditionally +time-consuming tasks like ingesting data (``read_csv``, ``read_excel``, +``read_parquet``, etc.). + +.. code:: python + + # import pandas as pd + import modin.pandas as pd + + df = pd.read_csv("big.csv") # use all your cores! + `Odo <http://odo.pydata.org>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -400,16 +417,6 @@ If also displays progress bars. # df.apply(func) df.parallel_apply(func) -`Ray <https://ray.readthedocs.io/en/latest/pandas_on_ray.html>`__ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -pandas on Ray is an early stage DataFrame library that wraps pandas and transparently distributes the data and computation. The user does not need to know how many cores their system has, nor do they need to specify how to distribute the data. In fact, users can continue using their previous pandas notebooks while experiencing a considerable speedup from pandas on Ray, even on a single machine. Only a modification of the import statement is needed, as we demonstrate below. Once you’ve changed your import statement, you’re ready to use pandas on Ray just like you would pandas. - -.. code:: python - - # import pandas as pd - import ray.dataframe as pd - `Vaex <https://docs.vaex.io/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Resolves #37247 Signed-off-by: Devin Petersohn <devin.petersohn@gmail.com> - [x] closes #37247 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37249
2020-10-19T14:16:46Z
2020-10-20T00:35:17Z
2020-10-20T00:35:16Z
2020-11-17T17:17:49Z
CI move code directives to pre-commit, remove some outdated checks
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3e1222b7be277..df10e35288144 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -62,6 +62,11 @@ repos: |math|module|note|raw|seealso|toctree|versionadded |versionchanged|warning):[^:] files: \.(py|pyx|rst)$ + - id: incorrect-code-directives + name: Check for incorrect code block or IPython directives + language: pygrep + entry: (\.\. code-block ::|\.\. ipython ::) + files: \.(py|pyx|rst)$ - repo: https://github.com/asottile/yesqa rev: v1.2.2 hooks: diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 37d52506ae6ae..cb34652e11cd3 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -207,18 +207,6 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -r -E --include '*.py' '(unittest(\.| import )mock|mock\.Mock\(\)|mock\.patch)' pandas/tests/ RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Check for wrong space after code-block directive and before colon (".. code-block ::" instead of ".. code-block::")' ; echo $MSG - invgrep -R --include="*.rst" ".. code-block ::" doc/source - RET=$(($RET + $?)) ; echo $MSG "DONE" - - MSG='Check for wrong space after ipython directive and before colon (".. ipython ::" instead of ".. ipython::")' ; echo $MSG - invgrep -R --include="*.rst" ".. ipython ::" doc/source - RET=$(($RET + $?)) ; echo $MSG "DONE" - - MSG='Check for extra blank lines after the class definition' ; echo $MSG - invgrep -R --include="*.py" --include="*.pyx" -E 'class.*:\n\n( )+"""' . - RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Check for use of {foo!r} instead of {repr(foo)}' ; echo $MSG invgrep -R --include=*.{py,pyx} '!r}' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" @@ -243,12 +231,6 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include=*.{py,pyx} '\.__class__' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Check that no file in the repo contains trailing whitespaces' ; echo $MSG - INVGREP_APPEND=" <- trailing whitespaces found" - invgrep -RI --exclude=\*.{svg,c,cpp,html,js} --exclude-dir=env "\s$" * - RET=$(($RET + $?)) ; echo $MSG "DONE" - unset INVGREP_APPEND - MSG='Check code for instances of os.remove' ; echo $MSG invgrep -R --include="*.py*" --exclude "common.py" --exclude "test_writers.py" --exclude "test_store.py" -E "os\.remove" pandas/tests/ RET=$(($RET + $?)) ; echo $MSG "DONE"
Moving checks for incorrect code block or IPython directives to pre-commit so they're cross-platform and give faster feedback to devs. Also removing some no-longer-necessary code checks: - "Check that no file in the repo contains trailing whitespaces" is taken care of by the `trailing-whitespace` hook - "Check for extra blank lines after the class definition" is enforced by `black`
https://api.github.com/repos/pandas-dev/pandas/pulls/37241
2020-10-19T10:15:32Z
2020-10-20T01:08:38Z
2020-10-20T01:08:38Z
2020-10-20T07:39:53Z
CI move non-standard-import checks over to pre-commit
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index df10e35288144..e7738fb9a2979 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -56,12 +56,44 @@ repos: - id: incorrect-sphinx-directives name: Check for incorrect Sphinx directives language: pygrep - entry: >- - \.\. (autosummary|contents|currentmodule|deprecated - |function|image|important|include|ipython|literalinclude - |math|module|note|raw|seealso|toctree|versionadded - |versionchanged|warning):[^:] + entry: | + (?x) + # Check for cases of e.g. .. warning: instead of .. warning:: + \.\.\ ( + autosummary|contents|currentmodule|deprecated| + function|image|important|include|ipython|literalinclude| + math|module|note|raw|seealso|toctree|versionadded| + versionchanged|warning + ):[^:] files: \.(py|pyx|rst)$ + - id: non-standard-imports + name: Check for non-standard imports + language: pygrep + entry: | + (?x) + # Check for imports from pandas.core.common instead of `import pandas.core.common as com` + from\ pandas\.core\.common\ import| + from\ pandas\.core\ import\ common| + + # Check for imports from collections.abc instead of `from collections import abc` + from\ collections\.abc\ import| + + from\ numpy\ import\ nan + types: [python] + - id: non-standard-imports-in-tests + name: Check for non-standard imports in test suite + language: pygrep + entry: | + (?x) + # Check for imports from pandas._testing instead of `import pandas._testing as tm` + from\ pandas\._testing\ import| + from\ pandas\ import\ _testing\ as\ tm| + + # No direct imports from conftest + conftest\ import| + import\ conftest + types: [python] + files: ^pandas/tests/ - id: incorrect-code-directives name: Check for incorrect code block or IPython directives language: pygrep diff --git a/ci/code_checks.sh b/ci/code_checks.sh index f01cd9ba01470..926e90f3dfa0c 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -110,31 +110,6 @@ fi ### PATTERNS ### if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then - # Check for imports from pandas.core.common instead of `import pandas.core.common as com` - # Check for imports from collections.abc instead of `from collections import abc` - MSG='Check for non-standard imports' ; echo $MSG - invgrep -R --include="*.py*" -E "from pandas.core.common import" pandas - RET=$(($RET + $?)) ; echo $MSG "DONE" - invgrep -R --include="*.py*" -E "from pandas.core import common" pandas - RET=$(($RET + $?)) ; echo $MSG "DONE" - invgrep -R --include="*.py*" -E "from collections.abc import" pandas - RET=$(($RET + $?)) ; echo $MSG "DONE" - invgrep -R --include="*.py*" -E "from numpy import nan" pandas - RET=$(($RET + $?)) ; echo $MSG "DONE" - - # Checks for test suite - # Check for imports from pandas._testing instead of `import pandas._testing as tm` - invgrep -R --include="*.py*" -E "from pandas._testing import" pandas/tests - RET=$(($RET + $?)) ; echo $MSG "DONE" - invgrep -R --include="*.py*" -E "from pandas import _testing as tm" pandas/tests - RET=$(($RET + $?)) ; echo $MSG "DONE" - - # No direct imports from conftest - invgrep -R --include="*.py*" -E "conftest import" pandas/tests - RET=$(($RET + $?)) ; echo $MSG "DONE" - invgrep -R --include="*.py*" -E "import conftest" pandas/tests - RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Check for use of exec' ; echo $MSG invgrep -R --include="*.py*" -E "[^a-zA-Z0-9_]exec\(" pandas RET=$(($RET + $?)) ; echo $MSG "DONE"
Like this they're cross-platform, provide faster feedback to devs, and can use pre-commit `pygrep` instead of the custom `invgrep`
https://api.github.com/repos/pandas-dev/pandas/pulls/37240
2020-10-19T09:20:36Z
2020-10-22T23:51:37Z
2020-10-22T23:51:37Z
2020-10-23T07:17:22Z
CLN: clean-up test on addition of series/frames
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 04ba41307d0ef..1ec3a6f3beda2 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -815,31 +815,45 @@ class TestAdditionSubtraction: # __add__, __sub__, __radd__, __rsub__, __iadd__, __isub__ # for non-timestamp/timedelta/period dtypes - # TODO: This came from series.test.test_operators, needs cleanup - def test_arith_ops_df_compat(self): + @pytest.mark.parametrize( + "first, second, expected", + [ + ( + pd.Series([1, 2, 3], index=list("ABC"), name="x"), + pd.Series([2, 2, 2], index=list("ABD"), name="x"), + pd.Series([3.0, 4.0, np.nan, np.nan], index=list("ABCD"), name="x"), + ), + ( + pd.Series([1, 2, 3], index=list("ABC"), name="x"), + pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x"), + pd.Series([3, 4, 5, np.nan], index=list("ABCD"), name="x"), + ), + ], + ) + def test_add_series(self, first, second, expected): # GH#1134 - s1 = pd.Series([1, 2, 3], index=list("ABC"), name="x") - s2 = pd.Series([2, 2, 2], index=list("ABD"), name="x") - - exp = pd.Series([3.0, 4.0, np.nan, np.nan], index=list("ABCD"), name="x") - tm.assert_series_equal(s1 + s2, exp) - tm.assert_series_equal(s2 + s1, exp) - - exp = pd.DataFrame({"x": [3.0, 4.0, np.nan, np.nan]}, index=list("ABCD")) - tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp) - tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp) + tm.assert_series_equal(first + second, expected) + tm.assert_series_equal(second + first, expected) - # different length - s3 = pd.Series([1, 2, 3], index=list("ABC"), name="x") - s4 = pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x") - - exp = pd.Series([3, 4, 5, np.nan], index=list("ABCD"), name="x") - tm.assert_series_equal(s3 + s4, exp) - tm.assert_series_equal(s4 + s3, exp) - - exp = pd.DataFrame({"x": [3, 4, 5, np.nan]}, index=list("ABCD")) - tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp) - tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp) + @pytest.mark.parametrize( + "first, second, expected", + [ + ( + pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")), + pd.DataFrame({"x": [2, 2, 2]}, index=list("ABD")), + pd.DataFrame({"x": [3.0, 4.0, np.nan, np.nan]}, index=list("ABCD")), + ), + ( + pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")), + pd.DataFrame({"x": [2, 2, 2, 2]}, index=list("ABCD")), + pd.DataFrame({"x": [3, 4, 5, np.nan]}, index=list("ABCD")), + ), + ], + ) + def test_add_frames(self, first, second, expected): + # GH#1134 + tm.assert_frame_equal(first + second, expected) + tm.assert_frame_equal(second + first, expected) # TODO: This came from series.test.test_operators, needs cleanup def test_series_frame_radd_bug(self):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Cleanup in ``pandas/tests/arithmetic/test_numeric.py``. - Split ``test_arith_ops_df_compat`` into ``test_add_series`` and ``test_add_frames``. - Parametrize tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/37238
2020-10-19T08:09:30Z
2020-10-20T01:22:23Z
2020-10-20T01:22:23Z
2020-10-20T05:47:31Z
TST/CLN: Split out some to_string tests
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 78cb8ccc05077..dd85db19af959 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -158,16 +158,6 @@ def has_expanded_repr(df): @pytest.mark.filterwarnings("ignore::FutureWarning:.*format") class TestDataFrameFormatting: - def test_repr_embedded_ndarray(self): - arr = np.empty(10, dtype=[("err", object)]) - for i in range(len(arr)): - arr["err"][i] = np.random.randn(i) - - df = DataFrame(arr) - repr(df["err"]) - repr(df) - df.to_string() - def test_eng_float_formatter(self, float_frame): df = float_frame df.loc[5] = 0 @@ -204,13 +194,6 @@ def check(null_counts, result): check(True, False) check(False, False) - def test_repr_tuples(self): - buf = StringIO() - - df = DataFrame({"tups": list(zip(range(10), range(10)))}) - repr(df) - df.to_string(col_space=10, buf=buf) - def test_repr_truncation(self): max_len = 20 with option_context("display.max_colwidth", max_len): @@ -534,45 +517,6 @@ def test_str_max_colwidth(self): "1 foo bar stuff 1" ) - def test_to_string_truncate(self): - # GH 9784 - dont truncate when calling DataFrame.to_string - df = pd.DataFrame( - [ - { - "a": "foo", - "b": "bar", - "c": "let's make this a very VERY long line that is longer " - "than the default 50 character limit", - "d": 1, - }, - {"a": "foo", "b": "bar", "c": "stuff", "d": 1}, - ] - ) - df.set_index(["a", "b", "c"]) - assert df.to_string() == ( - " a b " - " c d\n" - "0 foo bar let's make this a very VERY long line t" - "hat is longer than the default 50 character limit 1\n" - "1 foo bar " - " stuff 1" - ) - with option_context("max_colwidth", 20): - # the display option has no effect on the to_string method - assert df.to_string() == ( - " a b " - " c d\n" - "0 foo bar let's make this a very VERY long line t" - "hat is longer than the default 50 character limit 1\n" - "1 foo bar " - " stuff 1" - ) - assert df.to_string(max_colwidth=20) == ( - " a b c d\n" - "0 foo bar let's make this ... 1\n" - "1 foo bar stuff 1" - ) - def test_auto_detect(self): term_width, term_height = get_terminal_size() fac = 1.05 # Arbitrary large factor to exceed term width @@ -633,95 +577,6 @@ def test_to_string_repr_unicode(self): finally: sys.stdin = _stdin - def test_to_string_unicode_columns(self, float_frame): - df = DataFrame({"\u03c3": np.arange(10.0)}) - - buf = StringIO() - df.to_string(buf=buf) - buf.getvalue() - - buf = StringIO() - df.info(buf=buf) - buf.getvalue() - - result = float_frame.to_string() - assert isinstance(result, str) - - def test_to_string_utf8_columns(self): - n = "\u05d0".encode() - - with option_context("display.max_rows", 1): - df = DataFrame([1, 2], columns=[n]) - repr(df) - - def test_to_string_unicode_two(self): - dm = DataFrame({"c/\u03c3": []}) - buf = StringIO() - dm.to_string(buf) - - def test_to_string_unicode_three(self): - dm = DataFrame(["\xc2"]) - buf = StringIO() - dm.to_string(buf) - - def test_to_string_with_formatters(self): - df = DataFrame( - { - "int": [1, 2, 3], - "float": [1.0, 2.0, 3.0], - "object": [(1, 2), True, False], - }, - columns=["int", "float", "object"], - ) - - formatters = [ - ("int", lambda x: f"0x{x:x}"), - ("float", lambda x: f"[{x: 4.1f}]"), - ("object", lambda x: f"-{x!s}-"), - ] - result = df.to_string(formatters=dict(formatters)) - result2 = df.to_string(formatters=list(zip(*formatters))[1]) - assert result == ( - " int float object\n" - "0 0x1 [ 1.0] -(1, 2)-\n" - "1 0x2 [ 2.0] -True-\n" - "2 0x3 [ 3.0] -False-" - ) - assert result == result2 - - def test_to_string_with_datetime64_monthformatter(self): - months = [datetime(2016, 1, 1), datetime(2016, 2, 2)] - x = DataFrame({"months": months}) - - def format_func(x): - return x.strftime("%Y-%m") - - result = x.to_string(formatters={"months": format_func}) - expected = "months\n0 2016-01\n1 2016-02" - assert result.strip() == expected - - def test_to_string_with_datetime64_hourformatter(self): - - x = DataFrame( - { - "hod": pd.to_datetime( - ["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f" - ) - } - ) - - def format_func(x): - return x.strftime("%H:%M") - - result = x.to_string(formatters={"hod": format_func}) - expected = "hod\n0 10:10\n1 12:12" - assert result.strip() == expected - - def test_to_string_with_formatters_unicode(self): - df = DataFrame({"c/\u03c3": [1, 2, 3]}) - result = df.to_string(formatters={"c/\u03c3": str}) - assert result == " c/\u03c3\n" + "0 1\n1 2\n2 3" - def test_east_asian_unicode_false(self): # not aligned properly because of east asian width @@ -3398,66 +3253,3 @@ def test_filepath_or_buffer_bad_arg_raises(float_frame, method): msg = "buf is not a file name and it has no write method" with pytest.raises(TypeError, match=msg): getattr(float_frame, method)(buf=object()) - - -@pytest.mark.parametrize( - "input_array, expected", - [ - ("a", "a"), - (["a", "b"], "a\nb"), - ([1, "a"], "1\na"), - (1, "1"), - ([0, -1], " 0\n-1"), - (1.0, "1.0"), - ([" a", " b"], " a\n b"), - ([".1", "1"], ".1\n 1"), - (["10", "-10"], " 10\n-10"), - ], -) -def test_format_remove_leading_space_series(input_array, expected): - # GH: 24980 - s = pd.Series(input_array).to_string(index=False) - assert s == expected - - -@pytest.mark.parametrize( - "input_array, expected", - [ - ({"A": ["a"]}, "A\na"), - ({"A": ["a", "b"], "B": ["c", "dd"]}, "A B\na c\nb dd"), - ({"A": ["a", 1], "B": ["aa", 1]}, "A B\na aa\n1 1"), - ], -) -def test_format_remove_leading_space_dataframe(input_array, expected): - # GH: 24980 - df = pd.DataFrame(input_array).to_string(index=False) - assert df == expected - - -def test_to_string_complex_number_trims_zeros(): - s = pd.Series([1.000000 + 1.000000j, 1.0 + 1.0j, 1.05 + 1.0j]) - result = s.to_string() - expected = "0 1.00+1.00j\n1 1.00+1.00j\n2 1.05+1.00j" - assert result == expected - - -def test_nullable_float_to_string(float_ea_dtype): - # https://github.com/pandas-dev/pandas/issues/36775 - dtype = float_ea_dtype - s = pd.Series([0.0, 1.0, None], dtype=dtype) - result = s.to_string() - expected = """0 0.0 -1 1.0 -2 <NA>""" - assert result == expected - - -def test_nullable_int_to_string(any_nullable_int_dtype): - # https://github.com/pandas-dev/pandas/issues/36775 - dtype = any_nullable_int_dtype - s = pd.Series([0, 1, None], dtype=dtype) - result = s.to_string() - expected = """0 0 -1 1 -2 <NA>""" - assert result == expected diff --git a/pandas/tests/io/formats/test_to_string.py b/pandas/tests/io/formats/test_to_string.py new file mode 100644 index 0000000000000..7944a0ea67a5f --- /dev/null +++ b/pandas/tests/io/formats/test_to_string.py @@ -0,0 +1,222 @@ +from datetime import datetime +from io import StringIO + +import numpy as np +import pytest + +from pandas import DataFrame, Series, option_context, to_datetime + + +def test_repr_embedded_ndarray(): + arr = np.empty(10, dtype=[("err", object)]) + for i in range(len(arr)): + arr["err"][i] = np.random.randn(i) + + df = DataFrame(arr) + repr(df["err"]) + repr(df) + df.to_string() + + +def test_repr_tuples(): + buf = StringIO() + + df = DataFrame({"tups": list(zip(range(10), range(10)))}) + repr(df) + df.to_string(col_space=10, buf=buf) + + +def test_to_string_truncate(): + # GH 9784 - dont truncate when calling DataFrame.to_string + df = DataFrame( + [ + { + "a": "foo", + "b": "bar", + "c": "let's make this a very VERY long line that is longer " + "than the default 50 character limit", + "d": 1, + }, + {"a": "foo", "b": "bar", "c": "stuff", "d": 1}, + ] + ) + df.set_index(["a", "b", "c"]) + assert df.to_string() == ( + " a b " + " c d\n" + "0 foo bar let's make this a very VERY long line t" + "hat is longer than the default 50 character limit 1\n" + "1 foo bar " + " stuff 1" + ) + with option_context("max_colwidth", 20): + # the display option has no effect on the to_string method + assert df.to_string() == ( + " a b " + " c d\n" + "0 foo bar let's make this a very VERY long line t" + "hat is longer than the default 50 character limit 1\n" + "1 foo bar " + " stuff 1" + ) + assert df.to_string(max_colwidth=20) == ( + " a b c d\n" + "0 foo bar let's make this ... 1\n" + "1 foo bar stuff 1" + ) + + +@pytest.mark.parametrize( + "input_array, expected", + [ + ("a", "a"), + (["a", "b"], "a\nb"), + ([1, "a"], "1\na"), + (1, "1"), + ([0, -1], " 0\n-1"), + (1.0, "1.0"), + ([" a", " b"], " a\n b"), + ([".1", "1"], ".1\n 1"), + (["10", "-10"], " 10\n-10"), + ], +) +def test_format_remove_leading_space_series(input_array, expected): + # GH: 24980 + s = Series(input_array).to_string(index=False) + assert s == expected + + +@pytest.mark.parametrize( + "input_array, expected", + [ + ({"A": ["a"]}, "A\na"), + ({"A": ["a", "b"], "B": ["c", "dd"]}, "A B\na c\nb dd"), + ({"A": ["a", 1], "B": ["aa", 1]}, "A B\na aa\n1 1"), + ], +) +def test_format_remove_leading_space_dataframe(input_array, expected): + # GH: 24980 + df = DataFrame(input_array).to_string(index=False) + assert df == expected + + +def test_to_string_unicode_columns(float_frame): + df = DataFrame({"\u03c3": np.arange(10.0)}) + + buf = StringIO() + df.to_string(buf=buf) + buf.getvalue() + + buf = StringIO() + df.info(buf=buf) + buf.getvalue() + + result = float_frame.to_string() + assert isinstance(result, str) + + +def test_to_string_utf8_columns(): + n = "\u05d0".encode() + + with option_context("display.max_rows", 1): + df = DataFrame([1, 2], columns=[n]) + repr(df) + + +def test_to_string_unicode_two(): + dm = DataFrame({"c/\u03c3": []}) + buf = StringIO() + dm.to_string(buf) + + +def test_to_string_unicode_three(): + dm = DataFrame(["\xc2"]) + buf = StringIO() + dm.to_string(buf) + + +def test_to_string_with_formatters(): + df = DataFrame( + { + "int": [1, 2, 3], + "float": [1.0, 2.0, 3.0], + "object": [(1, 2), True, False], + }, + columns=["int", "float", "object"], + ) + + formatters = [ + ("int", lambda x: f"0x{x:x}"), + ("float", lambda x: f"[{x: 4.1f}]"), + ("object", lambda x: f"-{x!s}-"), + ] + result = df.to_string(formatters=dict(formatters)) + result2 = df.to_string(formatters=list(zip(*formatters))[1]) + assert result == ( + " int float object\n" + "0 0x1 [ 1.0] -(1, 2)-\n" + "1 0x2 [ 2.0] -True-\n" + "2 0x3 [ 3.0] -False-" + ) + assert result == result2 + + +def test_to_string_with_datetime64_monthformatter(): + months = [datetime(2016, 1, 1), datetime(2016, 2, 2)] + x = DataFrame({"months": months}) + + def format_func(x): + return x.strftime("%Y-%m") + + result = x.to_string(formatters={"months": format_func}) + expected = "months\n0 2016-01\n1 2016-02" + assert result.strip() == expected + + +def test_to_string_with_datetime64_hourformatter(): + + x = DataFrame( + {"hod": to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")} + ) + + def format_func(x): + return x.strftime("%H:%M") + + result = x.to_string(formatters={"hod": format_func}) + expected = "hod\n0 10:10\n1 12:12" + assert result.strip() == expected + + +def test_to_string_with_formatters_unicode(): + df = DataFrame({"c/\u03c3": [1, 2, 3]}) + result = df.to_string(formatters={"c/\u03c3": str}) + assert result == " c/\u03c3\n" + "0 1\n1 2\n2 3" + + +def test_to_string_complex_number_trims_zeros(): + s = Series([1.000000 + 1.000000j, 1.0 + 1.0j, 1.05 + 1.0j]) + result = s.to_string() + expected = "0 1.00+1.00j\n1 1.00+1.00j\n2 1.05+1.00j" + assert result == expected + + +def test_nullable_float_to_string(float_ea_dtype): + # https://github.com/pandas-dev/pandas/issues/36775 + dtype = float_ea_dtype + s = Series([0.0, 1.0, None], dtype=dtype) + result = s.to_string() + expected = """0 0.0 +1 1.0 +2 <NA>""" + assert result == expected + + +def test_nullable_int_to_string(any_nullable_int_dtype): + # https://github.com/pandas-dev/pandas/issues/36775 + dtype = any_nullable_int_dtype + s = Series([0, 1, None], dtype=dtype) + result = s.to_string() + expected = """0 0 +1 1 +2 <NA>""" + assert result == expected
test_format.py is over 3000 lines long so breaking some (but not all) to_string-specific tests into a separate test_to_string.py file (we already have test_to_html.py, test_to_latex.py, and test_to_markdown.py)
https://api.github.com/repos/pandas-dev/pandas/pulls/37234
2020-10-19T02:06:46Z
2020-10-20T23:02:57Z
2020-10-20T23:02:57Z
2020-10-20T23:05:39Z
REF: collect tests by method
diff --git a/pandas/tests/frame/methods/test_matmul.py b/pandas/tests/frame/methods/test_matmul.py new file mode 100644 index 0000000000000..c34bf991ffc4c --- /dev/null +++ b/pandas/tests/frame/methods/test_matmul.py @@ -0,0 +1,82 @@ +import operator + +import numpy as np +import pytest + +from pandas import DataFrame, Index, Series +import pandas._testing as tm + + +class TestMatMul: + def test_matmul(self): + # matmul test is for GH#10259 + a = DataFrame( + np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"] + ) + b = DataFrame( + np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"] + ) + + # DataFrame @ DataFrame + result = operator.matmul(a, b) + expected = DataFrame( + np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] + ) + tm.assert_frame_equal(result, expected) + + # DataFrame @ Series + result = operator.matmul(a, b.one) + expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + # np.array @ DataFrame + result = operator.matmul(a.values, b) + assert isinstance(result, DataFrame) + assert result.columns.equals(b.columns) + assert result.index.equals(Index(range(3))) + expected = np.dot(a.values, b.values) + tm.assert_almost_equal(result.values, expected) + + # nested list @ DataFrame (__rmatmul__) + result = operator.matmul(a.values.tolist(), b) + expected = DataFrame( + np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] + ) + tm.assert_almost_equal(result.values, expected.values) + + # mixed dtype DataFrame @ DataFrame + a["q"] = a.q.round().astype(int) + result = operator.matmul(a, b) + expected = DataFrame( + np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] + ) + tm.assert_frame_equal(result, expected) + + # different dtypes DataFrame @ DataFrame + a = a.astype(int) + result = operator.matmul(a, b) + expected = DataFrame( + np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] + ) + tm.assert_frame_equal(result, expected) + + # unaligned + df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) + df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) + + with pytest.raises(ValueError, match="aligned"): + operator.matmul(df, df2) + + def test_matmul_message_shapes(self): + # GH#21581 exception message should reflect original shapes, + # not transposed shapes + a = np.random.rand(10, 4) + b = np.random.rand(5, 3) + + df = DataFrame(b) + + msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" + with pytest.raises(ValueError, match=msg): + a @ df + with pytest.raises(ValueError, match=msg): + a.tolist() @ df diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index ee136533b0775..9dab5f509bc75 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1,6 +1,5 @@ from datetime import timedelta from decimal import Decimal -import operator import numpy as np import pytest @@ -1115,82 +1114,6 @@ def test_any_all_level_axis_none_raises(self, method): with pytest.raises(ValueError, match=xpr): getattr(df, method)(axis=None, level="out") - # --------------------------------------------------------------------- - # Matrix-like - - def test_matmul(self): - # matmul test is for GH 10259 - a = DataFrame( - np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"] - ) - b = DataFrame( - np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"] - ) - - # DataFrame @ DataFrame - result = operator.matmul(a, b) - expected = DataFrame( - np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] - ) - tm.assert_frame_equal(result, expected) - - # DataFrame @ Series - result = operator.matmul(a, b.one) - expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) - tm.assert_series_equal(result, expected) - - # np.array @ DataFrame - result = operator.matmul(a.values, b) - assert isinstance(result, DataFrame) - assert result.columns.equals(b.columns) - assert result.index.equals(pd.Index(range(3))) - expected = np.dot(a.values, b.values) - tm.assert_almost_equal(result.values, expected) - - # nested list @ DataFrame (__rmatmul__) - result = operator.matmul(a.values.tolist(), b) - expected = DataFrame( - np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] - ) - tm.assert_almost_equal(result.values, expected.values) - - # mixed dtype DataFrame @ DataFrame - a["q"] = a.q.round().astype(int) - result = operator.matmul(a, b) - expected = DataFrame( - np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] - ) - tm.assert_frame_equal(result, expected) - - # different dtypes DataFrame @ DataFrame - a = a.astype(int) - result = operator.matmul(a, b) - expected = DataFrame( - np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] - ) - tm.assert_frame_equal(result, expected) - - # unaligned - df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) - df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) - - with pytest.raises(ValueError, match="aligned"): - operator.matmul(df, df2) - - def test_matmul_message_shapes(self): - # GH#21581 exception message should reflect original shapes, - # not transposed shapes - a = np.random.rand(10, 4) - b = np.random.rand(5, 3) - - df = DataFrame(b) - - msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" - with pytest.raises(ValueError, match=msg): - a @ df - with pytest.raises(ValueError, match=msg): - a.tolist() @ df - # --------------------------------------------------------------------- # Unsorted diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py index c378194b9e2b2..37a4d7ffcf04f 100644 --- a/pandas/tests/frame/test_period.py +++ b/pandas/tests/frame/test_period.py @@ -31,10 +31,3 @@ def test_frame_setitem(self): rs = df.reset_index().set_index("index") assert isinstance(rs.index, PeriodIndex) tm.assert_index_equal(rs.index, rng) - - def test_frame_index_to_string(self): - index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M") - frame = DataFrame(np.random.randn(3, 4), index=index) - - # it works! - frame.to_string() diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 6d786d9580542..641331d73ff7a 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -8,6 +8,7 @@ from pandas import ( Categorical, DataFrame, + PeriodIndex, Series, date_range, option_context, @@ -218,3 +219,10 @@ def test_frame_datetime64_pre1900_repr(self): df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")}) # it works! repr(df) + + def test_frame_to_string_with_periodindex(self): + index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M") + frame = DataFrame(np.random.randn(3, 4), index=index) + + # it works! + frame.to_string() diff --git a/pandas/tests/series/methods/test_matmul.py b/pandas/tests/series/methods/test_matmul.py new file mode 100644 index 0000000000000..c311f1fd880a3 --- /dev/null +++ b/pandas/tests/series/methods/test_matmul.py @@ -0,0 +1,75 @@ +import operator + +import numpy as np +import pytest + +from pandas import DataFrame, Series +import pandas._testing as tm + + +class TestMatmul: + def test_matmul(self): + # matmul test is for GH#10259 + a = Series(np.random.randn(4), index=["p", "q", "r", "s"]) + b = DataFrame( + np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"] + ).T + + # Series @ DataFrame -> Series + result = operator.matmul(a, b) + expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"]) + tm.assert_series_equal(result, expected) + + # DataFrame @ Series -> Series + result = operator.matmul(b.T, a) + expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) + tm.assert_series_equal(result, expected) + + # Series @ Series -> scalar + result = operator.matmul(a, a) + expected = np.dot(a.values, a.values) + tm.assert_almost_equal(result, expected) + + # GH#21530 + # vector (1D np.array) @ Series (__rmatmul__) + result = operator.matmul(a.values, a) + expected = np.dot(a.values, a.values) + tm.assert_almost_equal(result, expected) + + # GH#21530 + # vector (1D list) @ Series (__rmatmul__) + result = operator.matmul(a.values.tolist(), a) + expected = np.dot(a.values, a.values) + tm.assert_almost_equal(result, expected) + + # GH#21530 + # matrix (2D np.array) @ Series (__rmatmul__) + result = operator.matmul(b.T.values, a) + expected = np.dot(b.T.values, a.values) + tm.assert_almost_equal(result, expected) + + # GH#21530 + # matrix (2D nested lists) @ Series (__rmatmul__) + result = operator.matmul(b.T.values.tolist(), a) + expected = np.dot(b.T.values, a.values) + tm.assert_almost_equal(result, expected) + + # mixed dtype DataFrame @ Series + a["p"] = int(a.p) + result = operator.matmul(b.T, a) + expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) + tm.assert_series_equal(result, expected) + + # different dtypes DataFrame @ Series + a = a.astype(int) + result = operator.matmul(b.T, a) + expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) + tm.assert_series_equal(result, expected) + + msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)" + # exception raised is of type Exception + with pytest.raises(Exception, match=msg): + a.dot(a.values[:3]) + msg = "matrices are not aligned" + with pytest.raises(ValueError, match=msg): + a.dot(b.T) diff --git a/pandas/tests/series/methods/test_repeat.py b/pandas/tests/series/methods/test_repeat.py new file mode 100644 index 0000000000000..b8e01e79ea02f --- /dev/null +++ b/pandas/tests/series/methods/test_repeat.py @@ -0,0 +1,30 @@ +import numpy as np +import pytest + +from pandas import Series +import pandas._testing as tm + + +class TestRepeat: + def test_repeat(self): + ser = Series(np.random.randn(3), index=["a", "b", "c"]) + + reps = ser.repeat(5) + exp = Series(ser.values.repeat(5), index=ser.index.values.repeat(5)) + tm.assert_series_equal(reps, exp) + + to_rep = [2, 3, 4] + reps = ser.repeat(to_rep) + exp = Series(ser.values.repeat(to_rep), index=ser.index.values.repeat(to_rep)) + tm.assert_series_equal(reps, exp) + + def test_numpy_repeat(self): + ser = Series(np.arange(3), name="x") + expected = Series( + ser.values.repeat(2), name="x", index=ser.index.values.repeat(2) + ) + tm.assert_series_equal(np.repeat(ser, 2), expected) + + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.repeat(ser, 2, axis=0) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 1a469d3e3d88b..527feb6537e75 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1,80 +1,10 @@ -import operator - import numpy as np -import pytest import pandas as pd -from pandas import DataFrame, Series -import pandas._testing as tm +from pandas import Series class TestSeriesAnalytics: - def test_matmul(self): - # matmul test is for GH #10259 - a = Series(np.random.randn(4), index=["p", "q", "r", "s"]) - b = DataFrame( - np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"] - ).T - - # Series @ DataFrame -> Series - result = operator.matmul(a, b) - expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"]) - tm.assert_series_equal(result, expected) - - # DataFrame @ Series -> Series - result = operator.matmul(b.T, a) - expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) - tm.assert_series_equal(result, expected) - - # Series @ Series -> scalar - result = operator.matmul(a, a) - expected = np.dot(a.values, a.values) - tm.assert_almost_equal(result, expected) - - # GH 21530 - # vector (1D np.array) @ Series (__rmatmul__) - result = operator.matmul(a.values, a) - expected = np.dot(a.values, a.values) - tm.assert_almost_equal(result, expected) - - # GH 21530 - # vector (1D list) @ Series (__rmatmul__) - result = operator.matmul(a.values.tolist(), a) - expected = np.dot(a.values, a.values) - tm.assert_almost_equal(result, expected) - - # GH 21530 - # matrix (2D np.array) @ Series (__rmatmul__) - result = operator.matmul(b.T.values, a) - expected = np.dot(b.T.values, a.values) - tm.assert_almost_equal(result, expected) - - # GH 21530 - # matrix (2D nested lists) @ Series (__rmatmul__) - result = operator.matmul(b.T.values.tolist(), a) - expected = np.dot(b.T.values, a.values) - tm.assert_almost_equal(result, expected) - - # mixed dtype DataFrame @ Series - a["p"] = int(a.p) - result = operator.matmul(b.T, a) - expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) - tm.assert_series_equal(result, expected) - - # different dtypes DataFrame @ Series - a = a.astype(int) - result = operator.matmul(b.T, a) - expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) - tm.assert_series_equal(result, expected) - - msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)" - # exception raised is of type Exception - with pytest.raises(Exception, match=msg): - a.dot(a.values[:3]) - msg = "matrices are not aligned" - with pytest.raises(ValueError, match=msg): - a.dot(b.T) - def test_ptp(self): # GH21614 N = 1000 @@ -82,27 +12,6 @@ def test_ptp(self): ser = Series(arr) assert np.ptp(ser) == np.ptp(arr) - def test_repeat(self): - s = Series(np.random.randn(3), index=["a", "b", "c"]) - - reps = s.repeat(5) - exp = Series(s.values.repeat(5), index=s.index.values.repeat(5)) - tm.assert_series_equal(reps, exp) - - to_rep = [2, 3, 4] - reps = s.repeat(to_rep) - exp = Series(s.values.repeat(to_rep), index=s.index.values.repeat(to_rep)) - tm.assert_series_equal(reps, exp) - - def test_numpy_repeat(self): - s = Series(np.arange(3), name="x") - expected = Series(s.values.repeat(2), name="x", index=s.index.values.repeat(2)) - tm.assert_series_equal(np.repeat(s, 2), expected) - - msg = "the 'axis' parameter is not supported" - with pytest.raises(ValueError, match=msg): - np.repeat(s, 2, axis=0) - def test_is_monotonic(self): s = Series(np.random.randint(0, 10, size=1000))
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37233
2020-10-19T00:02:37Z
2020-10-19T13:01:17Z
2020-10-19T13:01:17Z
2020-10-19T16:31:22Z